python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""Utilities for eliminating boilerplate code to handle abstract streams with
CPU device.
"""
from contextlib import contextmanager
from typing import Generator, List, Union, cast
import torch
__all__: List[str] = []
class CPUStreamType:
pass
# The placeholder on place of streams for the CPU device instead of CUDA.
CPUStream = CPUStreamType()
# It represents both CUDA streams and the CPU stream.
AbstractStream = Union[torch.cuda.Stream, CPUStreamType]
def new_stream(device: torch.device) -> AbstractStream:
"""Creates a new stream for either CPU or CUDA device."""
if device.type != "cuda":
return CPUStream
return torch.cuda.Stream(device)
def current_stream(device: torch.device) -> AbstractStream:
""":func:`torch.cuda.current_stream` for either CPU or CUDA device."""
if device.type != "cuda":
return CPUStream
return torch.cuda.current_stream(device)
def default_stream(device: torch.device) -> AbstractStream:
""":func:`torch.cuda.default_stream` for either CPU or CUDA device."""
if device.type != "cuda":
return CPUStream
return torch.cuda.default_stream(device)
@contextmanager
def use_device(device: torch.device) -> Generator[None, None, None]:
""":func:`torch.cuda.device` for either CPU or CUDA device."""
if device.type != "cuda":
yield
return
with torch.cuda.device(device):
yield
@contextmanager
def use_stream(stream: AbstractStream) -> Generator[None, None, None]:
""":func:`torch.cuda.stream` for either CPU or CUDA stream."""
if not is_cuda(stream):
yield
return
with torch.cuda.stream(as_cuda(stream)):
yield
def get_device(stream: AbstractStream) -> torch.device:
"""Gets the device from CPU or CUDA stream."""
if is_cuda(stream):
return as_cuda(stream).device
return torch.device("cpu")
def wait_stream(source: AbstractStream, target: AbstractStream) -> None:
""":meth:`torch.cuda.Stream.wait_stream` for either CPU or CUDA stream. It
makes the source stream wait until the target stream completes work queued.
"""
if is_cuda(target):
if is_cuda(source):
# A CUDA stream waits another CUDA stream.
as_cuda(source).wait_stream(as_cuda(target))
else:
# CPU waits a CUDA stream.
as_cuda(target).synchronize()
# If the target is CPU, synchronization is not required.
def record_stream(tensor: torch.Tensor, stream: AbstractStream) -> None:
""":meth:`torch.Tensor.record_stream` for either CPU or CUDA stream."""
if is_cuda(stream):
# NOTE(sublee): record_stream() on a shifted view tensor throws
# RuntimeError in PyTorch 1.1.0, and does nothing in 1.2.0. To safely
# protect the tensor against unexpected reallocation, here we use a
# temporal tensor associated with the same storage without shifting as
# a workaround.
#
# Issue: https://github.com/pytorch/pytorch/issues/27366
#
tensor = tensor.new_empty([0]).set_(tensor.storage())
# Typechecking: torch.cuda.Stream is incompatible with torch._C.Stream
tensor.record_stream(as_cuda(stream)) # type: ignore[arg-type]
def is_cuda(stream: AbstractStream) -> bool:
"""Returns ``True`` if the given stream is a valid CUDA stream."""
return stream is not CPUStream
def as_cuda(stream: AbstractStream) -> torch.cuda.Stream:
"""Casts the given stream as :class:`torch.cuda.Stream`."""
return cast(torch.cuda.Stream, stream)
| pytorch-master | torch/distributed/pipeline/sync/stream.py |
from torch import nn
from typing import List
def partition_model(
module: nn.Sequential,
balance: List[int],
devices: List[int] = None):
"""
Given an :class:`nn.Sequential <torch.nn.Sequential>` module, partitions
the model across multiple GPU devices according the provided ``balance``
and ``devices``.
Args:
module (:class:`nn.Sequential <torch.nn.Sequential>`):
Sequential model representing the pipe.
balance (List[int]):
List indicating the number of layers in each partition.
devices (List[int], optional):
List indicating the device to use for each partition. Defaults to
``range(len(balance))``
"""
device_idx = 0
pipe_idx = 0
balanced_pipe = []
for num_layers in balance:
layers = []
for i in range(num_layers):
layers.append(module[pipe_idx])
pipe_idx += 1
device = device_idx if devices is None else devices[device_idx]
balanced_pipe.append(nn.Sequential(*layers).to(device))
device_idx += 1
return nn.Sequential(*balanced_pipe)
| pytorch-master | torch/distributed/pipeline/sync/utils.py |
# -*- coding: utf-8 -*-
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""The pipeline parallelism of Pipe."""
from queue import Queue
from types import TracebackType
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Type, Union, cast, Sequence
import torch
from torch import Tensor, nn
from torch.autograd.profiler import record_function
from .checkpoint import Checkpointing
from .copy import Copy, Wait
from .dependency import fork, join
from .microbatch import Batch
from .skip.layout import SkipLayout
from .skip.tracker import SkipTrackerThroughPotals, use_skip_tracker
from .stream import AbstractStream, current_stream, use_device
from .worker import Task, create_workers
__all__: List[str] = []
Tensors = Sequence[Tensor]
TensorOrTensors = Union[Tensor, Tensors]
ExcInfo = Tuple[Type[BaseException], BaseException, TracebackType]
# Queue is generic only in stubs.
# https://mypy.readthedocs.io/en/latest/common_issues.html#using-classes-that-are-generic-in-stubs-but-not-at-runtime
if TYPE_CHECKING:
InQueue = Queue[Optional["Task"]]
OutQueue = Queue[Tuple[bool, Union[Tuple["Task", Batch], ExcInfo, None]]]
else:
InQueue = Queue
OutQueue = Queue
def _depend(fork_from: Batch, join_to: Batch) -> None:
fork_from_idx = fork_from.find_tensor_idx()
join_to_idx = join_to.find_tensor_idx()
fork_from[fork_from_idx], phony = fork(fork_from[fork_from_idx])
join_to[join_to_idx] = join(join_to[join_to_idx], phony)
def _copy(batch: Batch, prev_stream: AbstractStream, next_stream: AbstractStream) -> None:
batch[:] = Copy.apply(prev_stream, next_stream, *batch)
# Gradients are only supported for float Tensors.
batch[:] = tuple([x.detach() if torch.is_tensor(x) and not x.is_floating_point() else x for x in batch])
def _wait(batch: Batch, prev_stream: AbstractStream, next_stream: AbstractStream) -> None:
batch[:] = Wait.apply(prev_stream, next_stream, *batch)
# Gradients are only supported for float Tensors.
batch[:] = tuple([x.detach() if torch.is_tensor(x) and not x.is_floating_point() else x for x in batch])
def _clock_cycles(m: int, n: int) -> Iterable[List[Tuple[int, int]]]:
"""Generates schedules for each clock cycle."""
# m: number of micro-batches
# n: number of partitions
# i: index of micro-batch
# j: index of partition
# k: clock number
#
# k (i,j) (i,j) (i,j)
# - ----- ----- -----
# 0 (0,0)
# 1 (1,0) (0,1)
# 2 (2,0) (1,1) (0,2)
# 3 (2,1) (1,2)
# 4 (2,2)
for k in range(m + n - 1):
yield [(k - j, j) for j in range(max(1 + k - m, 0), min(1 + k, n))]
class Pipeline:
"""The pipeline parallelism for Pipe."""
def __init__(
self,
partitions: List[nn.Sequential],
devices: List[torch.device],
copy_streams: List[List[AbstractStream]],
skip_layout: SkipLayout,
checkpoint_stop: int,
) -> None:
self.partitions = partitions
self.devices = devices
self.copy_streams = copy_streams
self.skip_layout = skip_layout
self.checkpoint_stop = checkpoint_stop
(self.in_queues, self.out_queues) = create_workers(devices)
def run(self, batches: List[Batch]) -> None:
"""Runs pipeline parallelism.
It modifies the given batches in place.
"""
partitions = self.partitions
devices = self.devices
skip_layout = self.skip_layout
m = len(batches)
n = len(partitions)
skip_trackers = [SkipTrackerThroughPotals(skip_layout) for _ in batches]
for schedule in _clock_cycles(m, n):
self.fence(batches, schedule, skip_trackers)
self.compute(batches, schedule, skip_trackers)
def fence(
self, batches: List[Batch], schedule: List[Tuple[int, int]], skip_trackers: List[SkipTrackerThroughPotals],
) -> None:
"""Copies micro-batches after computation for the previous
micro-batches.
"""
copy_streams = self.copy_streams
skip_layout = self.skip_layout
for i, j in schedule:
# Ensure that batches[i-1] is executed after batches[i] in
# backpropagation by an explicit dependency.
if i != 0 and j != 0:
_depend(batches[i - 1], batches[i])
next_stream = copy_streams[j][i]
for prev_j, ns, name in skip_layout.copy_policy(j):
prev_stream = copy_streams[prev_j][i]
skip_trackers[i].copy(batches[i], prev_stream, next_stream, ns, name)
if j != 0:
prev_stream = copy_streams[j - 1][i]
_copy(batches[i], prev_stream, next_stream)
def compute(
self, batches: List[Batch], schedule: List[Tuple[int, int]], skip_trackers: List[SkipTrackerThroughPotals],
) -> None:
"""Runs tasks with synchronization to copy streams."""
partitions = self.partitions
devices = self.devices
copy_streams = self.copy_streams
checkpoint_stop = self.checkpoint_stop
# Disable checkpointing if in eval mode.
if not self.partitions[0].training:
checkpoint_stop = 0
n = len(partitions)
streams = [current_stream(d) for d in devices]
exc_info: Optional[ExcInfo] = None
# With checkpointing, the autograd graph looks like this diagram:
# βββββββΈβββββββ
# β Copy β
# βββββββ°βββββββ (fence)
# β β β β β β β β β β β β β
# β (compute)
# βββββββΈβββββββ
# β Wait β [1] Synchronize the current stream with the copy stream.
# βββββββ°βββββββ
# βββββββΈβββββββ
# β Checkpoint β [2] Compute a partition within checkpointing.
# βββββββ°βββββββ
# βββββββΈβββββββ
# β Wait β [3] Synchronize the copy stream with the current stream.
# βββββββ°βββββββ
# β β β β β
# β βββββββ΄ββββββ
# β β Recompute β [4] Schedule the recomputation at backpropagation.
# β βββββββ¬ββββββ
# β β β β β
# β
# β β β β β β β β β β β β β
# βββββββΈβββββββ (fence)
# β Copy β
# βββββββ°βββββββ
for i, j in schedule:
batch = batches[i]
partition = partitions[j]
# Synchronize with the copied input. ([1] in the diagram)
if j != 0:
_wait(batch, copy_streams[j][i], streams[j])
# Determine whether checkpointing or not.
checkpoint = i < checkpoint_stop
if checkpoint:
def function(
*inputs,
partition: nn.Module = partition,
skip_tracker: SkipTrackerThroughPotals = skip_trackers[i],
chunk_id: int = i,
part_id: int = j,
) -> TensorOrTensors:
with use_skip_tracker(skip_tracker), record_function("chunk%d-part%d" % (chunk_id, part_id)):
return partition(*inputs)
chk = Checkpointing(function, batch) # type: ignore[arg-type]
task = Task(streams[j], compute=chk.checkpoint, finalize=chk.recompute)
del function, chk
else:
def compute(
batch: Batch = batch,
partition: nn.Module = partition,
skip_tracker: SkipTrackerThroughPotals = skip_trackers[i],
chunk_id: int = i,
part_id: int = j,
) -> Batch:
with use_skip_tracker(skip_tracker), record_function("chunk%d-part%d" % (chunk_id, part_id)):
return batch.call(partition)
task = Task(streams[j], compute=compute, finalize=None)
del compute
# Compute tasks in parallel. ([2] in the diagram)
self.in_queues[j].put(task)
for i, j in schedule:
ok, payload = self.out_queues[j].get()
# Hold the first exception.
if exc_info is not None:
continue
elif not ok:
exc_info = cast(ExcInfo, payload)
continue
task, batch = cast(Tuple[Task, Batch], payload)
# The copy stream synchronizes to copy the output. ([3] in the
# diagram)
if j != n - 1:
_wait(batch, streams[j], copy_streams[j][i])
# Finalize tasks. If checkpointing is enabled, here the
# recomputation is scheduled at backpropagation. ([4] in the
# diagram)
with use_device(devices[j]):
task.finalize(batch)
batches[i] = batch
# Fail at the first exception.
if exc_info is not None:
raise exc_info[0].with_traceback(exc_info[1], exc_info[2])
| pytorch-master | torch/distributed/pipeline/sync/pipeline.py |
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""Manipulation of micro-batches."""
import typing
from typing import Any, Callable, List, Union, cast, Sequence
import torch
from torch import Tensor
import torch.cuda.comm
__all__: List[str] = []
Tensors = Sequence[Tensor]
TensorOrTensors = Union[Tensor, Tensors]
Function = Callable[[TensorOrTensors], Union[List[Any], Tensor]]
class NoChunk(object):
"""
Wrapper for a Tensor in :meth:`Pipe.forward` indicating that the tensor
should not be chunked on the batch dimension and instead be replicated
as-is across all micro-batches. This is useful for tensors which might
not have any 'batch' semantics for the model.
"""
def __init__(self, inp: Tensor):
if not torch.is_tensor(inp):
raise TypeError(f'NoChunk only supported for tensors, found: {inp}')
self._tensor = inp
@property
def tensor(self):
return self._tensor
class Batch:
"""
An abstraction representing a microbatch in the pipeline.
"""
def __init__(self, values: Union[List[Any], Tensor]) -> None:
self._values = values
self.atomic = torch.is_tensor(values)
# Verify at least on tensor
if not self.atomic:
if not any(torch.is_tensor(value) for value in self._values):
raise TypeError(f'No tensors found in batch: {self._values}')
@property
def tensor(self) -> Tensor:
"""Retrieves the underlying tensor."""
if not self.atomic:
raise AttributeError("not atomic batch")
return cast(Tensor, self._values)
@property
def values(self):
"""Retreives the underlying values for the batch"""
return self._values
def find_tensor_idx(self):
"""
Retrieves the index of first tensor found.
"""
if self.atomic:
return 0
for i, value in enumerate(self._values):
if torch.is_tensor(value):
return i
raise TypeError("No tensor found!")
def get_device(self):
"""
Retrieves the device for this microbatch.
"""
if self.atomic:
return self._values.device # type: ignore[union-attr]
for value in self._values:
if torch.is_tensor(value):
return value.device
def call(self, function: Function) -> "Batch":
"""Calls a function on the microbatch. It also wraps
the output with :class:`Batch`.
"""
if self.atomic:
return Batch(function(self._values))
else:
return Batch(function(*self._values))
def __repr__(self) -> str:
return f"Batch[atomic={self.atomic!r}]({self._values!r})"
def __iter__(self):
if self.atomic:
yield self._values
else:
yield from self._values
def __len__(self) -> int:
return 1 if self.atomic else len(self._values)
def __getitem__(self, index: int):
if not self.atomic:
return self._values[index]
if index != 0:
raise IndexError("atomic batch allows index 0 only")
return self._values
# NOTE(sublee): pyflakes can't detect "overload" instead of "typing.overload".
@typing.overload
def __setitem__(self, index: int, value: Tensor) -> None:
...
@typing.overload
def __setitem__(self, index: slice, value: Tensors) -> None:
...
def __setitem__(self, index: Union[int, slice], value) -> None:
if isinstance(index, int):
self._setitem_by_index(index, value)
else:
self._setitem_by_slice(index, value)
def _setitem_by_index(self, index: int, value) -> None:
if not self.atomic:
i = index
self._values = self._values[:i] + (value,) + self._values[i + 1 :] # type: ignore[operator]
return
if index != 0:
raise IndexError("atomic batch allows index 0 only")
self._values = value
def _setitem_by_slice(self, index: slice, value) -> None:
if not (index.start is index.stop is index.step is None):
raise NotImplementedError("only slice [:] supported")
if not self.atomic:
self._values = value
return
if len(value) != 1:
raise IndexError("atomic batch cannot be replaced with multiple tensors")
self._values = value[0]
def check(first_device, *inputs) -> None:
"""
Checks whether the input contains at least one tensor and each tensor is
on the same device as the first partition.
Raises:
ValueError: input does not contain at least one tensor
"""
if not any(torch.is_tensor(input) for input in inputs):
raise TypeError(f'inputs do not have any tensors: {inputs}')
if any(torch.is_tensor(input) and input.device != first_device for input in inputs):
raise ValueError('All inputs should be on the same device as the first partition')
def scatter(*inputs, chunks: int) -> List[Batch]:
"""Splits an input mini-batch into multiple micro-batches."""
if len(inputs) == 1 and isinstance(inputs[0], Tensor):
return [Batch(x) for x in inputs[0].chunk(chunks)]
batches: List[Any] = [[] for _ in range(chunks)]
# Actual number of chunks produced
num_chunks = -1
for input in inputs:
if torch.is_tensor(input):
# Chunk only tensors.
tensors = input.chunk(chunks)
# Validate number of chunks equal across all inputs.
if num_chunks != -1 and num_chunks != len(tensors):
raise RuntimeError(f'Found different number of chunks produced for inputs: {num_chunks} and {len(tensors)}')
num_chunks = len(tensors)
for i, tensor in enumerate(tensors):
batches[i].append(tensor)
else:
# Replicate non-tensors or tensors wrapped with 'NoChunk'.
for i in range(chunks):
if isinstance(input, NoChunk):
# Extract the tensor out.
batches[i].append(input.tensor)
else:
batches[i].append(input)
# Truncate to actual number of chunks
batches = batches[:num_chunks]
return [Batch(x) for x in batches]
def gather(outputs: List[Batch]):
"""Concatenates output micro-batches into a mini-batch."""
output: Any
if outputs[0].atomic:
tensors = tuple(b.tensor for b in outputs)
output = torch.cat(tensors)
else:
output_buf: List[Any] = []
for i in range(len(outputs[0])):
output_type = type(outputs[0][i])
current_outputs = []
for batch in outputs:
if output_type != type(batch[i]):
raise TypeError(f'Types for microbatch outputs do not match, found: {output_type} and {type(batch[i])}')
current_outputs.append(batch[i])
if torch.is_tensor(outputs[0][i]):
output_buf.append(torch.cat(current_outputs))
else:
output_buf.append(current_outputs)
output = tuple(output_buf)
return output
| pytorch-master | torch/distributed/pipeline/sync/microbatch.py |
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""Arbitrary dependency between two autograd lanes."""
from typing import List, Tuple
import torch
from torch import Tensor
from .phony import get_phony
__all__: List[str] = []
def fork(input: Tensor) -> Tuple[Tensor, Tensor]:
"""Branches out from an autograd lane of the given tensor."""
if torch.is_grad_enabled() and input.requires_grad:
input, phony = Fork.apply(input)
else:
phony = get_phony(input.device, requires_grad=False)
return input, phony
class Fork(torch.autograd.Function):
@staticmethod
def forward(ctx: "Fork", input: Tensor) -> Tuple[Tensor, Tensor]: # type: ignore[override]
phony = get_phony(input.device, requires_grad=False)
return input.detach(), phony.detach()
@staticmethod
def backward(ctx: "Fork", grad_input: Tensor, grad_grad: Tensor) -> Tensor: # type: ignore[override]
return grad_input
def join(input: Tensor, phony: Tensor) -> Tensor:
"""Merges two autograd lanes."""
if torch.is_grad_enabled() and (input.requires_grad or phony.requires_grad):
input = Join.apply(input, phony)
return input
class Join(torch.autograd.Function):
@staticmethod
def forward(ctx: "Join", input: Tensor, phony: Tensor) -> Tensor: # type: ignore[override]
return input.detach()
@staticmethod
def backward(ctx: "Join", grad_input: Tensor) -> Tuple[Tensor, None]: # type: ignore[override]
return grad_input, None
| pytorch-master | torch/distributed/pipeline/sync/dependency.py |
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""The Pipe interface."""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Union, Sequence, Tuple, cast
import torch
from torch import Tensor, nn
from torch.distributed.rpc import RRef
import torch.autograd
import torch.cuda
from . import microbatch
from .batchnorm import DeferredBatchNorm
from .pipeline import Pipeline
from .skip.layout import inspect_skip_layout
from .skip.skippable import verify_skippables
from .stream import AbstractStream, new_stream
__all__ = ["Pipe"]
Device = Union[torch.device, int, str]
Devices = Union[Iterable[Device], List[Device]]
Tensors = Sequence[Tensor]
TensorOrTensors = Union[Tensor, Tensors]
if TYPE_CHECKING:
# Typechecking: nn.Module is not a Generic
Module = nn.Module[TensorOrTensors] # type: ignore[type-arg]
NamedModules = OrderedDict[str, Module]
else:
Module = nn.Module
NamedModules = OrderedDict
def _recommend_auto_balance(message: str) -> str:
"""Expands a message with recommendation to :mod:`torchpipe.balance`."""
return f"""{message}
If your model is still under development, its optimal balance would change
frequently. In this case, we highly recommend 'torch.distributed.pipeline.sync.balance' for
naive automatic balancing:
from torch.distributed.pipeline.sync import Pipe
from torch.distributed.pipeline.sync.balance import balance_by_time
partitions = torch.cuda.device_count()
sample = torch.empty(...)
balance = balance_by_time(partitions, model, sample)
model = Pipe(model, balance, ...)
"""
def _verify_module(module: nn.Sequential) -> None:
if not isinstance(module, nn.Sequential):
raise TypeError("module must be nn.Sequential to be partitioned")
named_children = list(module.named_children())
if len(named_children) != len(module):
raise ValueError("module with duplicate children is not supported")
def _verify_splitting(
module: nn.Sequential, partitions: List[nn.Sequential], devices: List[torch.device]
) -> None:
num_parameters = len(list(module.parameters()))
num_child_parameters = sum(len(list(child.parameters())) for child in module.children())
if num_parameters == num_child_parameters:
return
for i in range(len(partitions)):
for j in range(i + 1, len(partitions)):
parti = partitions[i]
partj = partitions[j]
if devices[i] == devices[j]:
continue
for p in parti.parameters():
for q in partj.parameters():
if p is q:
raise ValueError("module with duplicate parameters on distinct devices is not supported")
class BalanceError(ValueError):
pass
def _retrieve_device(module: nn.Module) -> torch.device:
"""Validates all parameters in the Module have the same device and returns
the appropriate device.
Args:
An ``nn.Module`` to process.
Returns:
``torch.Device`` for the entire module.
Raises:
ValueError:
If devices for ``nn.Module`` parameters are not all same.
"""
device = None
for parameter in module.parameters():
if device is None:
device = parameter.device
elif device != parameter.device:
raise ValueError(
'nn.Module: {}, should have all parameters on a single device,'
' please use .to() to place the module on a single device'.format(module))
return device if device is not None else torch.device("cpu")
class PipeSequential(nn.Sequential):
"""
Pipe variant of ``nn.Sequential`` which supports multiple inputs.
"""
def forward(self, *inputs):
for module in self:
if isinstance(inputs, Tuple): # type: ignore[arg-type]
inputs = module(*inputs)
else:
# Don't expand single variables (ex: lists/Tensor)
inputs = module(inputs)
return inputs
class WithDevice(nn.Module):
"""
Wraps an ``nn.Module`` which is part of ``nn.Sequential`` passed into :class:`Pipe`
that overrides the device for that module. In cases where :class:`Pipe`
can't implicitly determine the device for the module and places it on CPU,
this wrapper can be used to override the implicit behavior and explicitly
specify which device a module should run on.
The provided module is also moved to the given device via ``.to(device)``
by :class:`Pipe`
Args:
module(:class:`torch.nn.Module`): The module to be wrapped.
device(:class:`torch.device`): The device to run the module on.
Example::
>>> fc1 = nn.Linear(16, 8).cuda(0)
>>> fc2 = nn.Linear(8, 4).cuda(1)
>>> dropout = nn.Dropout()
>>>
>>> # Dropout does not have any parameters/buffers, but we want to
>>> # run it on cuda:1 to avoid any GPU to CPU transfers.
>>> model = nn.Sequential(fc1, fc2, WithDevice(dropout, 'cuda:1'))
>>> # xdoctest: +SKIP
>>> model = Pipe(model, chunks=8)
"""
def __init__(self, module: nn.Module, device: torch.device):
super(WithDevice, self).__init__()
self._module = module
self._device = torch.device(device)
def forward(self, *args, **kwargs):
return self._module(*args, **kwargs)
@property
def module(self):
return self._module
@property
def device(self):
return self._device
def _assemble_partition(modules: List[nn.Module]):
modules_list: List[nn.Module] = []
for module in modules:
if isinstance(module, nn.Sequential):
modules_list.extend(module.children())
else:
modules_list.append(module)
return PipeSequential(*modules_list)
def _split_module(modules: nn.Sequential) -> Tuple[List[nn.Sequential], List[torch.device]]:
partitions = []
devices = []
current_partition = []
current_device = None
for name, module in modules.named_children():
if isinstance(module, WithDevice):
# Process device override and move module to appropriate device.
device = module.device
module = module.module
module.to(device)
else:
device = _retrieve_device(module)
if current_device is not None and (current_device != device or device.type == 'cpu'):
partitions.append(_assemble_partition(current_partition))
devices.append(current_device)
current_partition = []
current_device = device
current_partition.append(module)
if current_device is not None:
partitions.append(_assemble_partition(current_partition))
devices.append(current_device)
partitions = cast(List[nn.Sequential], nn.ModuleList(partitions))
return partitions, devices
MOVING_DENIED = TypeError("denied to move parameters and buffers, " "because Pipe should manage device placement")
class Pipe(Module):
"""Wraps an arbitrary :class:`nn.Sequential <torch.nn.Sequential>` module
to train on using synchronous pipeline parallelism. If the module requires
lots of memory and doesn't fit on a single GPU, pipeline parallelism is a
useful technique to employ for training.
The implementation is based on the torchgpipe_ paper.
.. _torchgpipe: https://arxiv.org/abs/2004.09910
Pipe combines pipeline parallelism with checkpointing to reduce peak
memory required to train while minimizing device under-utilization.
You should place all the modules on the appropriate devices and wrap them
into an :class:`nn.Sequential <torch.nn.Sequential>` module defining the
desired order of execution. If a module does not contain any
parameters/buffers, it is assumed this module should be executed on CPU
and appropriate input tensors to the module are moved to CPU before
execution. This behavior can be overridden by the :class:`WithDevice`
wrapper which can be used to explicitly specify which device a module
should run on.
Args:
module (:class:`nn.Sequential <torch.nn.Sequential>`):
sequential module to be parallelized using pipelining. Each module
in the sequence has to have all of its parameters on a single
device. Each module in the sequence has to either be an nn.Module
or :class:`nn.Sequential <torch.nn.Sequential>` (to combine multiple
sequential modules on a single device)
chunks (int):
number of micro-batches (default: ``1``)
checkpoint (str):
when to enable checkpointing, one of ``'always'``,
``'except_last'``, or ``'never'`` (default: ``'except_last'``).
``'never'`` disables checkpointing completely, ``'except_last'``
enables checkpointing for all micro-batches except the last one
and ``'always'`` enables checkpointing for all micro-batches.
deferred_batch_norm (bool):
whether to use deferred ``BatchNorm`` moving statistics (default:
:data:`False`). If set to :data:`True`, we track statistics across
multiple micro-batches to update the running statistics per
mini-batch.
Raises:
TypeError:
the module is not a :class:`nn.Sequential <torch.nn.Sequential>`.
ValueError:
invalid arguments
Example::
Pipeline of two FC layers across GPUs 0 and 1.
>>> # Need to initialize RPC framework first.
>>> # xdoctest: +SKIP
>>> os.environ['MASTER_ADDR'] = 'localhost'
>>> os.environ['MASTER_PORT'] = '29500'
>>> torch.distributed.rpc.init_rpc('worker', rank=0, world_size=1)
>>>
>>> # Build pipe.
>>> fc1 = nn.Linear(16, 8).cuda(0)
>>> fc2 = nn.Linear(8, 4).cuda(1)
>>> model = nn.Sequential(fc1, fc2)
>>> model = Pipe(model, chunks=8)
>>> input = torch.rand(16, 16).cuda(0)
>>> output_rref = model(input)
.. note::
You can wrap a :class:`Pipe` model with
:class:`torch.nn.parallel.DistributedDataParallel` only when the
checkpoint parameter of :class:`Pipe` is ``'never'``.
.. note::
:class:`Pipe` only supports intra-node pipelining currently, but
will be expanded to support inter-node pipelining in the future.
The forward function returns an :class:`~torch.distributed.rpc.RRef`
to allow for inter-node pipelining in the future, where the output
might be on a remote host. For intra-node pipelinining you can use
:meth:`~torch.distributed.rpc.RRef.local_value` to retrieve the
output locally.
.. warning::
:class:`Pipe` is experimental and subject to change.
"""
def __init__(
self,
module: nn.Sequential,
chunks: int = 1,
checkpoint: str = "except_last",
deferred_batch_norm: bool = False,
) -> None:
super().__init__()
# Check if RPC framework is initialized.
if not torch.distributed.rpc._is_current_rpc_agent_set():
raise RuntimeError(
'Please initialize RPC framework for Pipe using '
'torch.distributed.rpc.init_rpc')
chunks = int(chunks)
checkpoint = str(checkpoint)
if chunks <= 0:
raise ValueError("number of chunks must be positive integer")
if checkpoint not in ["always", "except_last", "never"]:
raise ValueError("checkpoint is not one of 'always', 'except_last', or 'never'")
_verify_module(module)
# Verify if the underlying skippable modules satisfy integrity. The
# integrity can be verified before forward() because it is static.
verify_skippables(module)
self.chunks = chunks
self.checkpoint = checkpoint
if deferred_batch_norm:
module = DeferredBatchNorm.convert_deferred_batch_norm(module, chunks)
self.partitions, self.devices = _split_module(module)
_verify_splitting(module, self.partitions, self.devices)
self._copy_streams: List[List[AbstractStream]] = []
self._skip_layout = inspect_skip_layout(self.partitions)
# Separate CUDA streams for copy.
copy_streams = self._ensure_copy_streams()
# The micro-batch index where the checkpointing stops.
checkpoint_stop = {"always": self.chunks, "except_last": self.chunks - 1, "never": 0}[self.checkpoint]
self.pipeline = Pipeline(self.partitions, self.devices, copy_streams, self._skip_layout, checkpoint_stop)
def __len__(self) -> int:
"""Counts the length of the underlying sequential module."""
return sum(len(p) for p in self.partitions)
def __getitem__(self, index: int) -> nn.Module:
"""Gets a layer in the underlying sequential module."""
partitions = self.partitions
if index < 0:
partitions = partitions[::-1]
for partition in partitions:
try:
return partition[index]
except IndexError:
pass
shift = len(partition)
if index < 0:
index += shift
else:
index -= shift
raise IndexError
def __iter__(self) -> Iterable[nn.Module]:
"""Iterates over children of the underlying sequential module."""
for partition in self.partitions:
yield from partition
# Pipe should manage the device of each partition.
# Deny cuda(), cpu(), and to() with device, by TypeError.
def cuda(self, device: Optional[Device] = None) -> "Pipe":
raise MOVING_DENIED
def cpu(self) -> "Pipe":
raise MOVING_DENIED
def to(self, *args: Any, **kwargs: Any) -> "Pipe":
# Deny these usages:
#
# - to(device[, dtype, non_blocking])
# - to(tensor[, non_blocking])
#
# But allow this:
#
# - to(dtype[, non_blocking])
#
if "device" in kwargs or "tensor" in kwargs:
raise MOVING_DENIED
if args:
if isinstance(args[0], (torch.device, int, str)):
raise MOVING_DENIED
if torch.is_tensor(args[0]):
raise MOVING_DENIED
return super().to(*args, **kwargs)
def _ensure_copy_streams(self) -> List[List[AbstractStream]]:
"""Ensures that :class:`Pipe` caches CUDA streams for copy.
It's worth to cache CUDA streams although PyTorch already manages a
pool of pre-allocated CUDA streams, because it may reduce GPU memory
fragementation when the number of micro-batches is small.
"""
if not self._copy_streams:
for device in self.devices:
self._copy_streams.append([new_stream(device) for _ in range(self.chunks)])
return self._copy_streams
def forward(self, *inputs) -> RRef:
"""
Processes a single input mini-batch through the pipe and returns an
:class:`~torch.distributed.rpc.RRef` pointing to the output.
:class:`Pipe` is a fairly transparent module wrapper. It doesn't
modify the input and output signature of the underlying module. But
there's type restriction. Input and output have to contain at least one
tensor. This restriction is applied at partition boundaries too.
The sequence of inputs are fed into the first stage of the pipeline as
``*inputs``. As a result the positional args for this function should
match the positional args for the first stage of the pipeline. The same
condition applies for output of one stage of the pipeline which is the
input for the next stage.
The input tensor is split into multiple micro-batches based on the
``chunks`` parameter used to initialize :class:`Pipe`. The batch size
is assumed to be the first dimension of the tensor and if the batch
size is less than ``chunks``, the number of micro-batches is equal to
the batch size.
Only tensors are split into multiple micro-batches, non-Tensor inputs
are just replicated as-is in each micro-batch. For non-Tensor outputs
in the last stage of the pipeline, they are aggregated as a ``List``
and returned the user. For example, if you have 2 micro-batches
returning the integer 5, the user would receive the consolidated
output of `[5, 5]`
All the input tensors need to be on the same device as the first
partition of the pipeline.
If a tensor is wrapped with the :class:`NoChunk` wrapper, the tensor
is not split across micro-batches and is replicated as-is similar to
non-tensors.
Args:
inputs: input mini-batch
Returns:
:class:`~torch.distributed.rpc.RRef` to the output of the mini-batch
Raises:
TypeError: input doesn't contain at least one tensor
"""
first_partition_device = self.devices[0] if len(self.devices) != 0 else torch.device("cpu")
microbatch.check(first_partition_device, *inputs)
if not self.devices:
# Empty sequential module is not illegal.
return RRef(*inputs)
# Divide a mini-batch into micro-batches.
batches = microbatch.scatter(*inputs, chunks=self.chunks)
# Run pipeline parallelism.
self.pipeline.run(batches)
# Merge the micro-batches into one mini-batch.
output = microbatch.gather(batches)
return RRef(output)
| pytorch-master | torch/distributed/pipeline/sync/pipe.py |
# -*- coding: utf-8 -*-
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""Portal keeps a tensor in the pocket plane. The tensor becomes hidden to the
autograd engine. The shared context of three functions (:class:`PortalBlue`,
:class:`PortalOrange`, and :class:`PortalCopy`) out of the computation graph is
one of the most important feature of :mod:`torchpipe.skip`.
The metaphor is inspired by Portalβ’ from Valve.
"""
from typing import List, Optional, Tuple
import torch
from torch import Tensor
from ..copy import Context as CopyContext
from ..copy import Copy
from ..phony import get_phony
from ..stream import AbstractStream, get_device
__all__: List[str] = []
class Portal:
"""A portal for a tensor."""
def __init__(self, tensor: Optional[Tensor], tensor_life: int) -> None:
self.put_tensor(tensor, tensor_life)
self.grad: Optional[Tensor] = None
def blue(self) -> Tensor:
"""Creates a :class:`PortalBlue` which hides the underlying tensor from
the autograd engine.
Join the returning phony to the main lane of the autograd graph to
assure the correct backpropagation::
PortalBlue --+
|
---------- Join --
"""
tensor = self.use_tensor()
if tensor is None:
return get_phony(torch.device("cpu"), requires_grad=False)
return PortalBlue.apply(self, tensor)
def orange(self, phony: Tensor) -> Optional[Tensor]:
"""Creates a :class:`PortalOrange` which retrieves the hidden tensor
without losing ability of backpropagation.
Give a phony forked from the main lane of an autograd graph::
+-- PortalOrange --+
| |
-- Fork --------- f(a, b) --
"""
self.check_tensor_life()
if self.tensor is None:
return self.use_tensor()
return PortalOrange.apply(self, phony)
def copy(self, prev_stream: AbstractStream, next_stream: AbstractStream, phony: Tensor,) -> Tensor:
"""Copies the hidden tensor by a :class:`PortalCopy`.
Give a phony and use the returning phony to keep backpropagation::
+-- PortalCopy --+
| |
-- Fork ---------- Join --
"""
if self.tensor is None:
return get_phony(torch.device("cpu"), requires_grad=False)
return PortalCopy.apply(self, prev_stream, next_stream, phony)
def check_tensor_life(self) -> None:
if self.tensor_life <= 0:
raise RuntimeError("tensor in portal has been removed")
def put_tensor(self, tensor: Optional[Tensor], tensor_life: int) -> None:
"""Stores a tensor into this portal."""
# [Life of Tensor through Portal]
#
# The tensor can be retrieved by use_tensor() up to 'tensor_life'
# times. When the life becomes 0, the tensor will be deleted for
# deallocation in CUDA memory.
#
# The below events participate in a tensor through a portal.
# Note that [x] denotes the events which call use_tensor():
#
# 1. [x] blue()
# 2. [ ] PortalBlue.forward
# 3. [ ] copy()
# 4. [ ] PortalCopy.forward
# 5. [ ] orange()
# 6. [x] PortalOrange.forward
# - - - - - - - - - - - - - - - - - - - - - - - - - - -
# 7. [ ] orange() (recomputed)
# 8. [x] PortalOrange.forward (recomputed)
# 9. [ ] PortalOrange.backward
# 10. [ ] PortalCopy.backward
# 11. [x] blue() (recomputed)
# 12. [ ] PortalBlue.forward (recomputed)
# 13. [ ] PortalBlue.backward
#
self.tensor_life = tensor_life
if tensor_life > 0:
self.tensor = tensor
else:
self.tensor = None
def use_tensor(self) -> Optional[Tensor]:
"""Retrieves the underlying tensor and decreases the tensor life. When
the life becomes 0, it the tensor will be removed.
"""
self.check_tensor_life()
tensor = self.tensor
self.tensor_life -= 1
if self.tensor_life <= 0:
self.tensor = None
return tensor
def put_grad(self, grad: Tensor) -> None:
"""Stores a gradient into this portal."""
self.grad = grad
def use_grad(self) -> Tensor:
"""Retrieves and removes the underlying gradient. The gradient is
always ephemeral.
"""
if self.grad is None:
raise RuntimeError("grad in portal has been removed or never set")
grad = self.grad
self.grad = None
return grad
# Common interface between :class:`PortalBlue`, :class:`PortalOrange`, and
# :class:`PortalCopy`.
class Context(CopyContext):
portal: Portal
class PortalBlue(torch.autograd.Function):
"""Hides a tensor from the autograd engine by a :class:`Portal`."""
@staticmethod
# type: ignore[override]
def forward(
ctx: Context,
portal: Portal,
# This tensor must be retrieved by portal.use_tensor().
tensor: Tensor,
) -> Tensor:
ctx.portal = portal
phony = get_phony(tensor.device, requires_grad=False)
return phony.detach()
@staticmethod
# type: ignore[override]
def backward(ctx: Context, grad_phony: Tensor,) -> Tuple[None, Tensor]:
# The paired PortalOrange should keep the gradient.
grad = ctx.portal.use_grad()
return None, grad
class PortalOrange(torch.autograd.Function):
"""Retrieves the hidden tensor from a :class:`Portal`."""
@staticmethod
# type: ignore[override]
def forward(ctx: Context, portal: Portal, phony: Tensor) -> Tensor:
ctx.portal = portal
tensor = portal.use_tensor()
assert tensor is not None
return tensor.detach()
@staticmethod
def backward(ctx: Context, grad: Tensor) -> Tuple[None, None]: # type: ignore[override]
# The paired PortalBlue will use the gradient.
ctx.portal.put_grad(grad)
return None, None
class PortalCopy(torch.autograd.Function):
"""Copies the hidden tensor in a :class:`Portal`. It replaces the hidden
tensor with copied one.
"""
@staticmethod
# type: ignore[override]
def forward(
ctx: Context, portal: Portal, prev_stream: AbstractStream, next_stream: AbstractStream, phony: Tensor,
) -> Tensor:
ctx.portal = portal
assert portal.tensor is not None
(portal.tensor,) = Copy.forward(ctx, prev_stream, next_stream, portal.tensor)
phony = get_phony(get_device(next_stream), requires_grad=False)
return phony.detach()
@staticmethod
# type: ignore[override]
def backward(ctx: Context, grad_phony: Tensor,) -> Tuple[None, None, None, None]:
portal = ctx.portal
assert portal.grad is not None
_, _, portal.grad = Copy.backward(ctx, portal.grad)
return None, None, None, None
| pytorch-master | torch/distributed/pipeline/sync/skip/portal.py |
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""Static skip connection layout of ``@skippable`` modules."""
from typing import Dict, Iterable, List, Tuple
from torch import nn
from .namespace import Namespace
__all__: List[str] = []
class SkipLayout:
"""Represents a skip connection layout across partitions."""
# Skip routes indexed by 'ns, name': {(ns, name): (prev_j, next_j), ...}
by_ns_name: Dict[Tuple[Namespace, str], Tuple[int, int]]
# Skip routes indexed by partition number 'j': [[next_j]: [(prev_j, ns, name), ...], ...]
by_partition: List[List[Tuple[int, Namespace, str]]]
def __init__(self, num_partitions: int, skip_routes: Dict[Tuple[Namespace, str], Tuple[int, int]],) -> None:
# The skip routes are already indexed by 'ns, name'.
self.by_ns_name = skip_routes
# Index skip routes by partition number 'j'.
self.by_partition = [[] for _ in range(num_partitions)]
for (ns, name), (prev_j, next_j) in skip_routes.items():
self.by_partition[next_j].append((prev_j, ns, name))
for p in self.by_partition:
p.sort()
def copy_policy(self, next_j: int) -> Iterable[Tuple[int, Namespace, str]]:
"""Generates skip routes for the given destination partition number.
The skip routes are sorted by source partition number in ascending
order.
Yields:
Each tuple of (source partition number, namespace, name).
"""
for prev_j, ns, name in self.by_partition[next_j]:
if prev_j == next_j:
# This skip tensor will be popped at the same partition where
# it is stashed. In this case, copy is not required.
continue
yield (prev_j, ns, name)
def requires_copy(self, ns: Namespace, name: str) -> bool:
"""Whether the given namespace and name requires partition-to-partition
copy or not.
"""
prev_j, next_j = self.by_ns_name.get((ns, name), (-1, -1))
return prev_j != next_j
def inspect_skip_layout(partitions: List[nn.Sequential]) -> SkipLayout:
"""Inspects the skip connection layout in the given partitions."""
# NOTE(sublee): Hide circular import inside this subroutine. Circular
# import is not ideal but placing this logic near to SkipLayout may
# increase cohesion of code.
from .skippable import Skippable
skip_routes: Dict[Tuple[Namespace, str], Tuple[int, int]] = {}
stashed_at: Dict[Tuple[Namespace, str], int] = {}
for j, partition in enumerate(partitions):
def inspect_layer(layer):
if not isinstance(layer, Skippable):
return
for ns, name in layer.stashable():
stashed_at[(ns, name)] = j
for ns, name in layer.poppable():
prev_j = stashed_at.pop((ns, name))
skip_routes[(ns, name)] = (prev_j, j)
if isinstance(partition, nn.Sequential):
for layer in partition:
inspect_layer(layer)
else:
inspect_layer(partition)
return SkipLayout(len(partitions), skip_routes)
| pytorch-master | torch/distributed/pipeline/sync/skip/layout.py |
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""Supports efficiency with skip connections."""
from .namespace import Namespace
from .skippable import pop, skippable, stash, verify_skippables
__all__ = ["skippable", "stash", "pop", "verify_skippables", "Namespace"]
| pytorch-master | torch/distributed/pipeline/sync/skip/__init__.py |
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""Tracks skip tensors on a thread."""
from contextlib import contextmanager
import threading
from typing import Dict, Generator, List, Optional, Tuple
from torch import Tensor
from ..checkpoint import is_checkpointing
from ..dependency import fork, join
from ..microbatch import Batch
from ..stream import AbstractStream
from .layout import SkipLayout
from .namespace import Namespace
from .portal import Portal
__all__: List[str] = []
class SkipTracker:
"""Tracks saved skip tensors.
It will update the given micro-batch in place. This is because when it
manipulates the underlying skip tensors, the current micro-batch also has
to be connected with the skip tensors.
One thread has one skip tracker. Call :func:`current_skip_tracker` to get
the skip tracker on the current thread.
"""
def __init__(self) -> None:
self.tensors: Dict[Tuple[Namespace, str], Optional[Tensor]] = {}
def save(self, batch: Batch, ns: Namespace, name: str, tensor: Optional[Tensor]) -> None:
self.tensors[(ns, name)] = tensor
def load(self, batch: Batch, ns: Namespace, name: str) -> Optional[Tensor]:
return self.tensors.pop((ns, name))
def copy(
self, batch: Batch, prev_stream: AbstractStream, next_stream: AbstractStream, ns: Namespace, name: str,
) -> None:
raise TypeError("copy is not supported for non-portal skip tensors")
class SkipTrackerThroughPotals(SkipTracker):
"""Tracks saved skip tensors through portals. The skip tensors will be
hidden in portals so that the autograd engine does not need to track them.
This tracker is only used when the training or evaluating module is wrapped
with :class:`torchpipe.Pipe`.
"""
def __init__(self, skip_layout: SkipLayout) -> None:
super().__init__()
self.skip_layout = skip_layout
self.portals: Dict[Tuple[Namespace, str], Portal] = {}
def save(self, batch: Batch, ns: Namespace, name: str, tensor: Optional[Tensor]) -> None:
"""Saves the stashed skip tensor in a portal. The portal is then
connected to the given micro-batch with :class:`Join`.
"""
if not self.skip_layout.requires_copy(ns, name):
super().save(batch, ns, name, tensor)
return
# See [Tensor Life of Portal] at Portal.put_tensor() to understand the
# below tensor_life values. Here are the selected events which retrieve
# the tensor in portal:
#
# 1. [x] blue()
# ...
# 6. [x] PortalOrange.forward
# ...
# 8. [x] PortalOrange.forward (recomputed)
# ...
# 11. [x] blue() (recomputed)
#
if (ns, name) not in self.portals:
if is_checkpointing():
# Under checkpointing, the tensor used by the first
# PortalOrange should be alive in the portal. This tensor will
# be used again by the second PortalOrange during the
# recomputation.
tensor_life = 3 # Delete at [8. PortalOrange.forward (recomputed)]
else:
tensor_life = 2 # Delete at [6. PortalOrange.forward]
portal = Portal(tensor, tensor_life)
self.portals[(ns, name)] = portal
else:
# Under recomputation, the portal already exists.
portal = self.portals[(ns, name)]
# The existing tensor life already became 0. It should be reset as
# 1 to delete the tensor after the second PortalBlue immediately.
tensor_life = 1 # Delete at [11. blue() (recomputed)]
portal.put_tensor(tensor, tensor_life)
phony = portal.blue()
tensor_idx = batch.find_tensor_idx()
batch[tensor_idx] = join(batch[tensor_idx], phony)
def load(self, batch: Batch, ns: Namespace, name: str) -> Optional[Tensor]:
"""Loads a skip tensor from the corresponding portal to pop. The given
micro-batch is connected to the portal with :class:`Fork`.
"""
if not self.skip_layout.requires_copy(ns, name):
tensor = super().load(batch, ns, name)
return tensor
portal = self.portals[(ns, name)]
tensor_idx = batch.find_tensor_idx()
batch[tensor_idx], phony = fork(batch[tensor_idx])
tensor = portal.orange(phony)
return tensor
def copy(
self, batch: Batch, prev_stream: AbstractStream, next_stream: AbstractStream, ns: Namespace, name: str,
) -> None:
"""Copies the skip tensor in the corresponding portal. The given
micro-batch and the portal will be tied with :class:`Fork` and
:class:`Join`.
"""
assert self.skip_layout.requires_copy(ns, name)
tensor_idx = batch.find_tensor_idx()
batch[tensor_idx], phony = fork(batch[tensor_idx])
portal = self.portals[(ns, name)]
phony = portal.copy(prev_stream, next_stream, phony)
batch[tensor_idx] = join(batch[tensor_idx], phony)
class ThreadLocal(threading.local):
def __init__(self) -> None:
self.skip_tracker: Optional[SkipTracker] = None
thread_local = ThreadLocal()
@contextmanager
def use_skip_tracker(skip_tracker: SkipTracker) -> Generator[None, None, None]:
"""Registers the given skip tracker on the current thread within a
context::
with use_skip_tracker(my_skip_tracker):
...
"""
orig = thread_local.skip_tracker
thread_local.skip_tracker = skip_tracker
try:
yield
finally:
thread_local.skip_tracker = orig
def current_skip_tracker() -> SkipTracker:
"""Gets the skip tracker on the current thread."""
skip_tracker = thread_local.skip_tracker
if skip_tracker is None:
skip_tracker = SkipTracker()
thread_local.skip_tracker = skip_tracker
return skip_tracker
| pytorch-master | torch/distributed/pipeline/sync/skip/tracker.py |
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""Provides isolated namespace of skip tensors."""
import abc
from functools import total_ordering
from typing import Any
import uuid
__all__ = ["Namespace"]
@total_ordering
class Namespace(metaclass=abc.ABCMeta):
"""Namespace for isolating skip tensors used by :meth:`isolate()
<torchpipe.skip.skippable.Skippable.isolate>`.
"""
__slots__ = ("id",)
def __init__(self) -> None:
self.id = uuid.uuid4()
def __repr__(self) -> str:
return f"<Namespace '{self.id}'>"
def __hash__(self) -> int:
return hash(self.id)
# Namespaces should support ordering, since SkipLayout will sort tuples
# including a namespace. But actual order between namespaces is not
# important. That's why they are ordered by version 4 UUID which generates
# random numbers.
def __lt__(self, other: Any) -> bool:
if isinstance(other, Namespace):
return self.id < other.id
return False
def __eq__(self, other: Any) -> bool:
if isinstance(other, Namespace):
return self.id == other.id
return False
# 'None' is the default namespace,
# which means that 'isinstance(None, Namespace)' is 'True'.
Namespace.register(type(None))
| pytorch-master | torch/distributed/pipeline/sync/skip/namespace.py |
# -*- coding: utf-8 -*-
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""The user interface to define skip connections."""
from typing import (
TYPE_CHECKING,
Any,
Callable,
ClassVar,
Dict,
FrozenSet,
Generator,
Iterable,
List,
Optional,
Set,
Sequence,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from torch import Tensor, nn
from ..microbatch import Batch
from .namespace import Namespace
from .tracker import current_skip_tracker
__all__ = ["skippable", "stash", "pop", "verify_skippables"]
Tensors = Sequence[Tensor]
TensorOrTensors = Union[Tensor, Tensors]
StashPop = Union["stash", "pop"]
StashPopGenerator = Generator[StashPop, Optional[Tensor], TensorOrTensors]
if TYPE_CHECKING:
# Typechecking: nn.Module is not a Generic
SkippableModule = nn.Module[Union[StashPopGenerator, TensorOrTensors]] # type: ignore[type-arg]
else:
SkippableModule = nn.Module
T = TypeVar("T", bound="Skippable")
class Skippable(nn.Module):
"""The base class for skippable modules.
Do not use this class directly. Define a subclass by :func:`skippable`
instead.
"""
module_cls: ClassVar[Type[SkippableModule]]
stashable_names: ClassVar[FrozenSet[str]]
poppable_names: ClassVar[FrozenSet[str]]
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__()
self.module = self.module_cls(*args, **kwargs) # type: ignore[call-arg]
self.namespaces: Dict[str, Namespace] = {}
def __repr__(self) -> str:
return f"@skippable({self.module})"
def namespaced(self, name: str) -> Tuple[Namespace, str]:
"""Prepends namespace for the given skip name."""
ns = self.namespaces.get(name)
ns = cast(Namespace, ns)
return (ns, name)
def stashable(self) -> Iterable[Tuple[Namespace, str]]:
"""Iterates over namespaced skip names to be stashed."""
for name in self.stashable_names:
yield self.namespaced(name)
def poppable(self) -> Iterable[Tuple[Namespace, str]]:
"""Iterates over namespaced skip names to be popped."""
for name in self.poppable_names:
yield self.namespaced(name)
def isolate(self: T, ns: Namespace, *, only: Optional[Iterable[str]] = None) -> T:
r"""Isolates a specified subset or the whole set of skip tensors into a
namespace. In a single sequential module, skip tensors with the same
name are not allowed unless they are isolated by different namespaces.
Here's an example using the same name for skip tensors twice. Each pair
of ``Layer1`` and ``Layer2`` is isolated with its own namespace ``ns1``
and ``ns2``. There is no conflict anymore::
ns1 = Namespace()
ns2 = Namespace()
model = nn.Sequential(
Layer1().isolate(ns1),
Layer1().isolate(ns2),
Layer2(),
Layer3().isolate(ns2),
Layer3().isolate(ns1),
)
When `only` parameter is omitted, all skip tensors are isolated. You
can isolate a subset of skip tensors by passing `only` parameter::
ns_alice = Namespace()
ns_bob = Namespace()
model = nn.Sequential(
...
StashStashPop().isolate(ns_alice, only=['alice']) \
.isolate(ns_bob, only=['bob']),
...
)
Args:
ns (Namespace):
namespace for isolation
Keyword Args:
only (iterable of strs):
names of specific skip tensors to be isolated (omit this option
to isolate all skip tensors declared in this module)
Returns:
this module itself
"""
names: Iterable[str]
if only is None:
names = self.stashable_names | self.poppable_names
else:
names = set(only)
for name in names:
self.namespaces[name] = ns
return self
def dispatch(
self,
input,
handle_stash: Callable[[str, Optional[Tensor]], None],
handle_pop: Callable[[str], Optional[Tensor]],
):
"""Dispatches :class:`stash` or :class:`pop` commands generated by the
module's ``forward()``.
"""
generator = self.module(input)
if not isinstance(generator, Generator):
# The underlying module returned output without any yield.
output = generator
return output
try:
op = next(generator)
while True:
if isinstance(op, stash):
handle_stash(op.name, op.tensor)
op = next(generator)
continue
if isinstance(op, pop):
tensor = handle_pop(op.name)
op = generator.send(tensor)
continue
raise TypeError("%r is not a command from @skippable" % op)
except StopIteration as stop:
output = stop.args[0]
return output
def forward(self, input: Union[List[Any], Tensor]) -> TensorOrTensors:
"""Performs the forward propagation. :class:`stash` or :class:`pop`
commands will be handled by portals silently. The portals won't be
exposed to users.
Raises:
RuntimeError:
illegal 'stash' or 'pop' is found.
"""
skip_tracker = current_skip_tracker()
stashed_tensors: Dict[str, Optional[Tensor]] = {}
# Load skip tensors that might be popped.
poppable_tensors = {}
batch = Batch(input)
for ns, name in self.poppable():
try:
poppable_tensors[name] = skip_tracker.load(batch, ns, name)
except KeyError:
raise RuntimeError(f"'{name}' has not been stashed")
input = batch.values
# Handle skip commands.
def handle_stash(name: str, tensor: Optional[Tensor]) -> None:
if name not in self.stashable_names:
raise RuntimeError(f"'{name}' has not been declared as stashable")
stashed_tensors[name] = tensor
def handle_pop(name: str) -> Optional[Tensor]:
if name not in self.poppable_names:
raise RuntimeError(f"'{name}' has not been declared as poppable")
return poppable_tensors.pop(name)
output = self.dispatch(input, handle_stash, handle_pop)
# All declared skips must be stashed or popped.
not_stashed = self.stashable_names - stashed_tensors.keys()
if not_stashed:
comma_names = ", ".join("'%s'" % n for n in not_stashed)
raise RuntimeError(f"{comma_names} must be stashed but have not")
not_popped = poppable_tensors.keys()
if not_popped:
comma_names = ", ".join("'%s'" % n for n in not_popped)
raise RuntimeError(f"{comma_names} must be popped but have not")
# Save stashed skip tensors.
batch = Batch(output)
for ns, name in self.stashable():
tensor = stashed_tensors[name]
skip_tracker.save(batch, ns, name, tensor)
output = batch.values
return output
# TODO(sublee): Move to above of Skippable class for better read flow.
def skippable(
stash: Iterable[str] = (), pop: Iterable[str] = (),
) -> Callable[[Type[SkippableModule]], Type[Skippable]]:
"""The decorator to define a :class:`nn.Module <torch.nn.Module>` with skip
connections. Decorated modules are called "skippable". This functionality
works perfectly fine even when the module is not wrapped by
:class:`~torch.distributed.pipeline.sync.Pipe`.
Each skip tensor is managed by its name. Before manipulating skip tensors,
a skippable module must statically declare the names for skip tensors by
`stash` and/or `pop` parameters. Skip tensors with pre-declared name can be
stashed by ``yield stash(name, tensor)`` or popped by ``tensor = yield
pop(name)``.
Here is an example with three layers. A skip tensor named "1to3" is stashed
and popped at the first and last layer, respectively::
@skippable(stash=['1to3'])
class Layer1(nn.Module):
def forward(self, input):
yield stash('1to3', input)
return f1(input)
class Layer2(nn.Module):
def forward(self, input):
return f2(input)
@skippable(pop=['1to3'])
class Layer3(nn.Module):
def forward(self, input):
skip_1to3 = yield pop('1to3')
return f3(input) + skip_1to3
model = nn.Sequential(Layer1(), Layer2(), Layer3())
One skippable module can stash or pop multiple skip tensors::
@skippable(stash=['alice', 'bob'], pop=['carol'])
class StashStashPop(nn.Module):
def forward(self, input):
yield stash('alice', f_alice(input))
yield stash('bob', f_bob(input))
carol = yield pop('carol')
return input + carol
Every skip tensor must be associated with exactly one pair of `stash` and
`pop`. :class:`~torch.distributed.pipeline.sync.Pipe` checks this
restriction automatically when wrapping a module. You can also check the
restriction by :func:`verify_skippables`
without :class:`~torch.distributed.pipeline.sync.Pipe`.
"""
stashable_names = frozenset(stash)
poppable_names = frozenset(pop)
def extend_skippable(module_cls: Type[SkippableModule]) -> Type[Skippable]:
name = module_cls.__name__
bases = (Skippable,)
attrs = {"module_cls": module_cls, "stashable_names": stashable_names, "poppable_names": poppable_names}
return type(name, bases, attrs)
return extend_skippable
class stash:
"""The command to stash a skip tensor.
::
def forward(self, input):
yield stash('name', input)
return f(input)
Args:
name (str): name of skip tensor
input (torch.Tensor or None): tensor to pass to the skip connection
"""
__slots__ = ("name", "tensor")
def __init__(self, name: str, tensor: Optional[Tensor]) -> None:
self.name = name
self.tensor = tensor
class pop:
"""The command to pop a skip tensor.
::
def forward(self, input):
skip = yield pop('name')
return f(input) + skip
Args:
name (str): name of skip tensor
Returns:
the skip tensor previously stashed by another layer under the same name
"""
__slots__ = ("name",)
def __init__(self, name: str) -> None:
self.name = name
def verify_skippables(module: nn.Sequential) -> None:
"""Verifies if the underlying skippable modules satisfy integrity.
Every skip tensor must have only one pair of `stash` and `pop`. If there
are one or more unmatched pairs, it will raise :exc:`TypeError` with the
detailed messages.
Here are a few failure cases. :func:`verify_skippables` will report failure
for these cases::
# Layer1 stashes "1to3".
# Layer3 pops "1to3".
nn.Sequential(Layer1(), Layer2())
# βββββ ?
nn.Sequential(Layer2(), Layer3())
# ? βββββ
nn.Sequential(Layer1(), Layer2(), Layer3(), Layer3())
# βββββββββββββββββββββ ^^^^^^
nn.Sequential(Layer1(), Layer1(), Layer2(), Layer3())
# ^^^^^^ βββββββββββββββββββββ
To use the same name for multiple skip tensors, they must be isolated by
different namespaces. See :meth:`isolate()
<torchpipe.skip.skippable.Skippable.isolate>`.
Raises:
TypeError:
one or more pairs of `stash` and `pop` are not matched.
"""
stashed: Set[Tuple[Namespace, str]] = set()
popped: Set[Tuple[Namespace, str]] = set()
msgs: List[str] = []
for layer_name, layer in module.named_children():
if not isinstance(layer, Skippable):
continue
for name in layer.stashable_names & layer.poppable_names:
msg = f"'{layer_name}' declared '{name}' both as stashable and as poppable"
msgs.append(msg)
for ns, name in layer.stashable():
if name in layer.poppable_names:
continue
if (ns, name) in stashed:
msg = f"'{layer_name}' redeclared '{name}' as stashable " "but not isolated by namespace"
msgs.append(msg)
continue
stashed.add((ns, name))
for ns, name in layer.poppable():
if name in layer.stashable_names:
continue
if (ns, name) in popped:
msg = f"'{layer_name}' redeclared '{name}' as poppable " "but not isolated by namespace"
msgs.append(msg)
continue
if (ns, name) not in stashed:
msg = f"'{layer_name}' declared '{name}' as poppable but it was not stashed"
msgs.append(msg)
continue
popped.add((ns, name))
for (_, name) in stashed - popped:
msg = f"no module declared '{name}' as poppable but stashed"
msgs.append(msg)
if msgs:
raise TypeError(
"one or more pairs of stash and pop do not match:\n\n%s" "" % "\n".join("* %s" % x for x in msgs)
)
| pytorch-master | torch/distributed/pipeline/sync/skip/skippable.py |
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""Per-layer profilers."""
import copy
import time
from typing import Any, Generator, List, Union, Sequence
import torch
from torch import Tensor
import torch.nn as nn
from ..microbatch import Batch
__all__: List[str] = []
Device = Union[torch.device, int, str]
Tensors = Sequence[Tensor]
TensorOrTensors = Union[Tensor, Tensors]
def layerwise_sandbox(module: nn.Sequential, device: torch.device,) -> Generator[nn.Module, None, None]:
"""Copies layers for ease to profile. It doesn't modify the given
module.
"""
for layer in module:
layer_copy = copy.deepcopy(layer)
layer_copy.to(device)
layer_copy.train()
yield layer_copy
def detach(batch: Batch) -> None:
"""Detaches from autograd graph."""
for i, x in enumerate(batch):
batch[i] = x.detach().requires_grad_(x.requires_grad)
def profile_times(module: nn.Sequential, sample: Union[List[Any], Tensor], timeout: float, device: torch.device,) -> List[int]:
"""Profiles elapsed times per layer."""
if any(p.grad is not None for p in module.parameters()):
raise ValueError("some parameter already has gradient")
_batch = Batch(sample)
for i, x in enumerate(_batch):
_batch[i] = x.detach().to(device).requires_grad_(x.requires_grad)
time_bufs: List[List[float]] = [[] for _ in module]
begun_at = time.time()
while time.time() - begun_at < timeout:
batch = _batch
for i, layer in enumerate(layerwise_sandbox(module, device)):
detach(batch)
if device.type == "cuda":
torch.cuda.synchronize(device)
tick = time.time()
# Forward
batch = batch.call(layer)
# Backward
backward_tensors = tuple(y for y in batch if y.requires_grad)
if backward_tensors:
torch.autograd.backward(backward_tensors, backward_tensors)
if device.type == "cuda":
torch.cuda.synchronize(device)
tock = time.time()
time_bufs[i].append(tock - tick)
us = 1_000_000
return [sum(int(t * us) for t in buf) for buf in time_bufs]
def profile_sizes(
module: nn.Sequential, input: Union[List[Any], Tensor], chunks: int, param_scale: float, device: torch.device,
) -> List[int]:
"""Profiles CUDA memory usage per layer."""
if device.type != "cuda":
raise ValueError("size profiler supports only CUDA device")
batch = Batch(input)
sizes: List[int] = []
latent_scale = batch[0].size(0) / chunks
for i, x in enumerate(batch):
batch[i] = x[:1].detach().to(device).requires_grad_(x.requires_grad)
for layer in layerwise_sandbox(module, device):
detach(batch)
# Detect memory usage at forward.
memory_before = torch.cuda.memory_allocated(device)
batch = batch.call(layer)
memory_after = torch.cuda.memory_allocated(device)
latent_size = memory_after - memory_before
# Analyze size of parameters.
param_size = sum(p.storage().nbytes() for p in layer.parameters())
# Combine size of parameters and activations with normalize scales.
size = latent_size * latent_scale + param_size * param_scale
sizes.append(int(size))
return sizes
| pytorch-master | torch/distributed/pipeline/sync/_balance/profile.py |
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""A helper to roughly balance a sequential module.
Usage::
import torch
from torch.distributed.pipeline.sync import Pipe
from torch.distributed.pipeline.sync.balance import balance_by_time
sample = torch.empty(128, 3, 224, 224)
balance = balance_by_time(torch.cuda.device_count(), model, sample)
pipe = Pipe(model, balance, chunks=8)
"""
from typing import Any, List, Union, Sequence
import torch
from torch import Tensor
import torch.nn as nn
from . import blockpartition
from .profile import profile_sizes, profile_times
__all__ = ["balance_by_time", "balance_by_size"]
Device = Union[torch.device, int, str]
Tensors = Sequence[Tensor]
TensorOrTensors = Union[Tensor, Tensors]
def balance_cost(cost: List[int], partitions: int) -> List[int]:
partitioned = blockpartition.solve(cost, partitions)
return [len(p) for p in partitioned]
def balance_by_time(
partitions: int,
module: nn.Sequential,
sample: Union[List[Any], Tensor],
*,
timeout: float = 1.0,
device: Device = torch.device("cuda"),
) -> List[int]:
"""Naive automatic balancing by elapsed time per layer.
::
sample = torch.empty(128, 3, 224, 224)
balance = balance_by_time(torch.cuda.device_count(), model, sample)
pipe = Pipe(model, balance, chunks=8)
Args:
partitions (int):
intended number of partitions
module (torch.nn.Sequential):
sequential module to be partitioned
sample (torch.Tensor):
example input with arbitrary batch size
Keyword Args:
timeout (float):
profiling iterates again if the timeout (in second) is not exceeded
(default: ``1.0``)
device ('cpu' or 'cuda' device):
CPU or CUDA device where each layer is profiled (default: the
current CUDA device)
Returns:
A list of number of layers in each partition. Use it for the `balance`
parameter of :class:`~torchpipe.Pipe`.
.. note::
`module` and `sample` must be placed on the same device.
"""
times = profile_times(module, sample, timeout, torch.device(device))
return balance_cost(times, partitions)
def balance_by_size(
partitions: int,
module: nn.Sequential,
input: Union[List[Any], Tensor],
*,
chunks: int = 1,
param_scale: float = 2.0,
device: Device = torch.device("cuda"),
) -> List[int]:
"""Naive automatic balancing by CUDA memory usage per layer.
During training, required memory for parameters depends on which optimizer
is used. Optimizers may use buffers for each parameter to track
optimization statistics internally, such as momentum buffer in SGD.
To get more reliable size based balance, you should specify `param_scale`
with regard to your optimizer. The default `param_scale` is 2 instead of 1
due to gradient accumulation which is necessary for every optimizer.
Follow this guide to choose correct `param_scale` for typical optimizers:
========= ============= =========================================
Optimizer `param_scale` Internal State
========= ============= =========================================
SGD 2--3 (momentum_buffer)
Adam 4--5 exp_avg, exp_avg_sq, (max_exp_avg_sq)
Adadelta 4 square_avg, acc_delta
Adagrad 3 sum
RMSprop 3--5 square_avg, (momentum_buffer), (grad_avg)
========= ============= =========================================
Here's a simple example with the Adam optimizer::
balance = balance_by_size(
torch.cuda.device_count(),
model,
# Same size with mini-batch to train
torch.empty(1024, 3, 224, 224),
# Number of micro-batches to train with Pipe
chunks=8,
# 4 for Adam
param_scale=4.0,
)
pipe = Pipe(model, balance, chunks=8)
adam = Adam(pipe.parameters())
Args:
partitions (int):
intended number of partitions
module (torch.nn.Sequential):
sequential module to be partitioned
input (torch.Tensor):
example mini-batch with the same size to train
Keyword Args:
chunks (int):
number of micro-batches will be used to train (default: ``1``)
param_scale (float):
how many copies of parameters would be allocated for training. It
depends on optimizer. See the above guide. (default: ``2.0``)
device ('cuda' device):
CUDA device where each layer is profiled (default: the current CUDA
device)
Returns:
A list of number of layers in each partition. Use it for the `balance`
parameter of :class:`~torchpipe.Pipe`.
.. note::
`module` and `input` must be placed on the same CUDA device.
"""
sizes = profile_sizes(module, input, chunks, param_scale, torch.device(device))
return balance_cost(sizes, partitions)
| pytorch-master | torch/distributed/pipeline/sync/_balance/__init__.py |
# -*- coding: utf-8 -*-
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""Implements "Block Partitions of Sequences" by Imre BΓ‘rΓ‘ny et al.
Paper: https://arxiv.org/pdf/1308.2452.pdf
"""
from typing import Iterator, List, Tuple
__all__ = ["solve"]
def solve(sequence: List[int], partitions: int = 1) -> List[List[int]]:
"""Splits a sequence into several partitions to minimize variance for each
partition.
The result might not be optimal. However, it can be done only in O(knΒ³),
where k is the number of partitions and n is the length of the sequence.
"""
if partitions < 1:
raise ValueError(f"partitions must be a positive integer ({partitions} < 1)")
n = len(sequence)
if n < partitions:
raise ValueError(f"sequence is shorter than intended partitions ({n} < {partitions})")
# Normalize the sequence in [0, 1].
minimum = min(sequence)
maximum = max(sequence) - minimum
normal_sequence: List[float]
if maximum == 0:
normal_sequence = [0 for _ in sequence]
else:
normal_sequence = [(x - minimum) / maximum for x in sequence]
splits = [n // partitions * (x + 1) for x in range(partitions - 1)] + [n]
def block_size(i: int) -> float:
start = splits[i - 1] if i > 0 else 0
stop = splits[i]
return sum(normal_sequence[start:stop])
def leaderboard() -> Iterator[Tuple[float, int]]:
return ((block_size(i), i) for i in range(partitions))
while True:
"""
(1) Fix p β [k] with M(P) = bp. So Bp is a maximal block of P.
"""
# max_size: M(P)
max_size, p = max(leaderboard())
while True:
"""
(2) If M(P) β€ m(P) + 1, then stop.
"""
# min_size: m(P)
min_size, q = min(leaderboard())
if max_size <= min_size + 1:
return [sequence[i:j] for i, j in zip([0] + splits[:-1], splits)]
"""
(3) If M(P) > m(P) + 1, then let m(P) = bq for the q β [k] which is
closest to p (ties broken arbitrarily). Thus Bq is a minimal block
of P. Let Bh be the block next to Bq between Bp and Bq. (Note that
Bh is a non-empty block: if it were, then m(P) = 0 and we should
have chosen Bh instead of Bq.)
"""
if p < q:
"""
So either p < q and then h = qβ1 and we define P β by moving
the last element from Bh = Bqβ1 to Bq,
"""
h = q - 1
splits[h] -= 1
else:
"""
or q < p, and then h = q + 1 and P β is obtained by moving the
first element of Bh = Bq+1 to Bq.
"""
h = q + 1
splits[q] += 1
"""
Set P = P β . If p = h, then go to (1), else go to (2).
"""
if p == h:
break
| pytorch-master | torch/distributed/pipeline/sync/_balance/blockpartition.py |
#!/usr/bin/env/python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from torch.distributed.launcher.api import ( # noqa: F401
LaunchConfig,
elastic_launch,
launch_agent,
)
| pytorch-master | torch/distributed/launcher/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import sys
import uuid
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch.distributed.elastic.rendezvous.registry as rdzv_registry
from torch.distributed.elastic import events, metrics
from torch.distributed.elastic.agent.server.api import WorkerSpec
from torch.distributed.elastic.agent.server.local_elastic_agent import LocalElasticAgent
from torch.distributed.elastic.multiprocessing import SignalException, Std
from torch.distributed.elastic.multiprocessing.errors import ChildFailedError
from torch.distributed.elastic.rendezvous import RendezvousParameters
from torch.distributed.elastic.rendezvous.utils import parse_rendezvous_endpoint
from torch.distributed.elastic.utils.logging import get_logger
__all__ = ['LaunchConfig', 'elastic_launch', 'launch_agent']
logger = get_logger()
@dataclass
class LaunchConfig:
"""
Creates a rendezvous config.
Args:
min_nodes: Minimum amount of nodes that the user function will
be launched on. Elastic agent ensures that the user
function start only when the min_nodes amount enters
the rendezvous.
max_nodes: Maximum amount of nodes that the user function
will be launched on.
nproc_per_node: On each node the elastic agent will launch
this amount of workers that will execute user
defined function.
rdzv_backend: rdzv_backend to use in the rendezvous (zeus-adapter, etcd).
rdzv_endpoint: The endpoint of the rdzv sync. storage.
rdzv_configs: Key, value pair that specifies rendezvous specific configuration.
rdzv_timeout: Legacy argument that specifies timeout for the rendezvous. It is going
to be removed in future versions, see the note below. The default timeout is 900 seconds.
run_id: The unique run id of the job (if not passed a unique one will be
deduced from run environment - flow workflow id in flow - or auto generated).
role: User defined role of the worker (defaults to "trainer").
max_restarts: The maximum amount of restarts that elastic agent will conduct
on workers before failure.
monitor_interval: The interval in seconds that is used by the elastic_agent
as a period of monitoring workers.
start_method: The method is used by the elastic agent to start the
workers (spawn, fork, forkserver).
log_dir: base log directory where log files are written. If not set,
one is created in a tmp dir but NOT removed on exit.
redirects: configuration to redirect stdout/stderr to log files.
Pass a single ``Std`` enum to redirect all workers,
or a mapping keyed by local_rank to selectively redirect.
tee: configuration to "tee" stdout/stderr to console + log file.
metrics_cfg: configuration to initialize metrics.
..note:
`rdzv_timeout` is a legacy argument that will be removed in future.
Set the timeout via `rdzv_configs['timeout']`
"""
min_nodes: int
max_nodes: int
nproc_per_node: int
run_id: str = ""
role: str = "default_role"
rdzv_endpoint: str = ""
rdzv_backend: str = "etcd"
rdzv_configs: Dict[str, Any] = field(default_factory=dict)
rdzv_timeout: int = -1
max_restarts: int = 3
monitor_interval: float = 30
start_method: str = "spawn"
log_dir: Optional[str] = None
redirects: Union[Std, Dict[int, Std]] = Std.NONE
tee: Union[Std, Dict[int, Std]] = Std.NONE
metrics_cfg: Dict[str, str] = field(default_factory=dict)
def __post_init__(self):
default_timeout = 900
if self.rdzv_timeout != -1:
self.rdzv_configs["timeout"] = self.rdzv_timeout
elif "timeout" not in self.rdzv_configs:
self.rdzv_configs["timeout"] = default_timeout
class elastic_launch:
"""
Launches an torchelastic agent on the container that invoked the entrypoint.
1. Pass the ``entrypoint`` arguments as non ``kwargs`` (e.g. no named parameters)/
``entrypoint`` can be a function or a command.
2. The return value is a map of each worker's output mapped
by their respective global rank.
Usage
::
def worker_fn(foo):
# ...
def main():
# entrypoint is a function.
outputs = elastic_launch(LaunchConfig, worker_fn)(foo)
# return rank 0's output
return outputs[0]
# entrypoint is a command and ``script.py`` is the python module.
ouptuts = elestic_launch(LaunchConfig, "script.py")(args)
ouptuts = elestic_launch(LaunchConfig, "python")("script.py")
"""
def __init__(
self,
config: LaunchConfig,
entrypoint: Union[Callable, str, None],
):
self._config = config
self._entrypoint = entrypoint
def __call__(self, *args):
return launch_agent(self._config, self._entrypoint, list(args))
def _get_entrypoint_name(
entrypoint: Union[Callable, str, None], args: List[Any]
) -> str:
"""Retrive entrypoint name with the rule:
1. If entrypoint is a function, use ``entrypont.__qualname__``.
2. If entrypoint is a string, check its value:
2.1 if entrypoint equals to ``sys.executable`` (like "python"), use the first element from ``args``
which does not start with hifen letter (for example, "-u" will be skipped).
2.2 otherwise, use ``entrypoint`` value.
3. Otherwise, return empty string.
"""
if isinstance(entrypoint, Callable): # type: ignore[arg-type]
return entrypoint.__name__ # type: ignore[union-attr]
elif isinstance(entrypoint, str):
if entrypoint == sys.executable:
return next((arg for arg in args if arg[0] != "-"), "")
else:
return entrypoint
else:
return ""
def _get_addr_and_port(
rdzv_parameters: RendezvousParameters,
) -> Tuple[Optional[str], Optional[int]]:
if rdzv_parameters.backend != "static":
return (None, None)
endpoint = rdzv_parameters.endpoint
endpoint = endpoint.strip()
if not endpoint:
raise ValueError(
"Endpoint is missing in endpoint. Try to add --master_addr and --master_port"
)
master_addr, master_port = parse_rendezvous_endpoint(endpoint, default_port=-1)
if master_port == -1:
raise ValueError(
f"port is missing in endpoint: {endpoint}. Try to specify --master_port"
)
return (master_addr, master_port)
def launch_agent(
config: LaunchConfig,
entrypoint: Union[Callable, str, None],
args: List[Any],
) -> Dict[int, Any]:
if not config.run_id:
run_id = str(uuid.uuid4().int)
logger.warning(f"config has no run_id, generated a random run_id: {run_id}")
config.run_id = run_id
entrypoint_name = _get_entrypoint_name(entrypoint, args)
logger.info(
f"Starting elastic_operator with launch configs:\n"
f" entrypoint : {entrypoint_name}\n"
f" min_nodes : {config.min_nodes}\n"
f" max_nodes : {config.max_nodes}\n"
f" nproc_per_node : {config.nproc_per_node}\n"
f" run_id : {config.run_id}\n"
f" rdzv_backend : {config.rdzv_backend}\n"
f" rdzv_endpoint : {config.rdzv_endpoint}\n"
f" rdzv_configs : {config.rdzv_configs}\n"
f" max_restarts : {config.max_restarts}\n"
f" monitor_interval : {config.monitor_interval}\n"
f" log_dir : {config.log_dir}\n"
f" metrics_cfg : {config.metrics_cfg}\n"
)
rdzv_parameters = RendezvousParameters(
backend=config.rdzv_backend,
endpoint=config.rdzv_endpoint,
run_id=config.run_id,
min_nodes=config.min_nodes,
max_nodes=config.max_nodes,
**config.rdzv_configs,
)
master_addr, master_port = _get_addr_and_port(rdzv_parameters)
spec = WorkerSpec(
role=config.role,
local_world_size=config.nproc_per_node,
entrypoint=entrypoint,
args=tuple(args),
rdzv_handler=rdzv_registry.get_rendezvous_handler(rdzv_parameters),
max_restarts=config.max_restarts,
monitor_interval=config.monitor_interval,
redirects=config.redirects,
tee=config.tee,
master_addr=master_addr,
master_port=master_port,
)
agent = LocalElasticAgent(
spec=spec, start_method=config.start_method, log_dir=config.log_dir
)
shutdown_rdzv = True
try:
metrics.initialize_metrics(metrics.MetricsConfig(config.metrics_cfg))
result = agent.run()
# records that agent.run() has succeeded NOT that workers have succeeded
events.record(agent.get_event_succeeded())
if result.is_failed():
# ChildFailedError is treated specially by @record
# if the error files for the failed children exist
# @record will copy the first error (root cause)
# to the error file of the launcher process.
raise ChildFailedError(
name=entrypoint_name,
failures=result.failures,
)
return result.return_values
except ChildFailedError:
raise
except SignalException:
# when the agent dies with a signal do NOT shutdown the rdzv_handler
# since this closes the rendezvous on this rdzv_id permanently and
# prevents any additional scaling events
shutdown_rdzv = False
events.record(agent.get_event_failed())
raise
except Exception:
events.record(agent.get_event_failed())
raise
finally:
if shutdown_rdzv:
spec.rdzv_handler.shutdown()
| pytorch-master | torch/distributed/launcher/api.py |
# Keep old package for BC purposes, this file should be removed once
# everything moves to the `torch.distributed._shard` package.
import sys
import torch
import warnings
from torch.distributed._shard.sharded_tensor import * # noqa: F403
warnings.warn(
"torch.distributed._sharded_tensor will be deprecated, use torch.distributed._shard.sharded_tensor instead",
DeprecationWarning
)
sys.modules['torch.distributed._sharded_tensor'] = torch.distributed._shard.sharded_tensor
| pytorch-master | torch/distributed/_sharded_tensor/__init__.py |
import torch
if torch.distributed.rpc.is_available():
from .api.remote_module import RemoteModule
from .functional import * # noqa: F403
| pytorch-master | torch/distributed/nn/__init__.py |
import torch
import torch.distributed as dist
from torch.autograd import Function
# The two imports below are not always available depending on the
# USE_DISTRIBUTED compile flag. Make sure they raise import error
# if we're trying to use them.
from torch.distributed import group, ReduceOp
def broadcast(tensor, src, group=group.WORLD):
"""
Broadcasts the tensor to the whole group.
``tensor`` must have the same number of elements in all processes
participating in the collective.
Arguments:
tensor (Tensor): Data to be sent if ``src`` is the rank of current
process.
src (int): Source rank.
group (ProcessGroup, optional): The process group to work on.
Returns:
Tensor: Received tensor from the broadcast op.
"""
return _Broadcast.apply(src, group, tensor)
def gather(tensor, dst=0, group=group.WORLD):
"""
Gathers a list of tensors in a single process.
Arguments:
tensor (Tensor): Input tensor.
dst (int, optional): Destination rank (default is 0).
group (ProcessGroup, optional): The process group to work on.
Returns:
tuple[Tensor]: List of appropriately-sized tensors with the gathered data.
"""
return _Gather.apply(dst, group, tensor)
def scatter(tensors, src=0, group=group.WORLD):
"""
Scatters a list of tensors to all processes in a group.
Each process will receive exactly one tensor and store its data in the
``tensor`` argument.
Arguments:
tensors (list[Tensor]): List of tensors to scatter on the source rank.
Receivers must pass ``None`.
src (int, optional): Source rank (default is 0).
group (ProcessGroup, optional): The process group to work on.
Returns:
Tensor: Output tensor from the scatter operation.
"""
return _Scatter.apply(src, group, *tensors)
def reduce(tensor, dst, op=ReduceOp.SUM, group=group.WORLD):
"""
Reduces the tensor data across all machines.
Only the process with rank ``dst`` is going to receive the final result.
Arguments:
tensor (Tensor): Input of the collective.
dst (int): Destination rank.
op (optional): One of the values from
``torch.distributed.ReduceOp``
enum. Specifies an operation used for element-wise reductions.
group (ProcessGroup, optional): The process group to work on.
Returns:
Tensor: Output of the collective.
"""
return _Reduce.apply(dst, op, group, tensor)
def reduce_scatter(output, input_list, op=ReduceOp.SUM, group=group.WORLD):
"""
Reduces, then scatters a list of tensors to all processes in a group.
Arguments:
output (Tensor): Output tensor.
input_list (list[Tensor]): List of tensors to reduce and scatter.
op (optional): One of the values from
``torch.distributed.ReduceOp``
enum. Specifies an operation used for element-wise reductions.
group (ProcessGroup, optional): The process group to work on.
Returns:
Tensor: Output of the collective.
"""
return _Reduce_Scatter.apply(op, group, output, *input_list)
def all_gather(tensor, group=group.WORLD):
"""
Gathers tensors from the whole group in a list.
Arguments:
tensor (Tensor): Tensor to be broadcast from current process.
group (ProcessGroup, optional): The process group to work on.
Returns:
tuple([Tensor]): Output of the collective.
"""
return _AllGather.apply(group, tensor)
def _all_gather_base(output_tensor, input_tensor, group=group.WORLD):
"""
Single tensor all gather. Gathers a single tensor from all ranks, and puts them in a single output tensor.
Args:
output_tensor (Tensor): Output tensor. It should contain
correctly-sized tensors to be used for output of the collective.
input_tensor (Tensor): Tensor to be broadcast from current process.
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
Examples:
>>> # All tensors below are of torch.int64 dtype.
>>> # We have 2 process groups, 2 ranks.
>>> # xdoctest: +SKIP("incorrect want text")
>>> output_tensor = torch.zeros(2, dtype=torch.int64)
>>> output_tensor
[tensor([0, 0])] # Rank 0 and 1
>>> tensor = torch.arange(1, dtype=torch.int64) + 1 + rank
>>> tensor
tensor([1]) # Rank 0
tensor([2]) # Rank 1
>>> dist.all_gather_base(output_tensor, tensor)
>>> output_tensor
tensor([1,2]) # Rank 0
tensor([1,2]) # Rank 1
.. warning::
`_all_gather_base` is experimental and subject to change.
It is the caller's responsibility to ensure the output_tensor
is correctly sized.
"""
return _AllGatherBase.apply(output_tensor, input_tensor, group)
def all_to_all(output_tensor_list, input_tensor_list, group=group.WORLD):
"""
Each process scatters list of input tensors to all processes in a group and
return gathered list of tensors in output list.
Arguments:
out_tensor_list (list[Tensor]): list of tensors to gather one per rank.
input_tensor_list (list[Tensor]): List of tensors to scatter one per rank.
group (ProcessGroup, optional): The process group to work on.
Returns:
tuple([Tensor]): Output of the collective.
"""
return _AlltoAll.apply(group, output_tensor_list, *input_tensor_list)
def all_to_all_single(
output,
input,
output_split_sizes=None,
input_split_sizes=None,
group=group.WORLD,
):
"""
Each process splits input tensor and then scatters the split list
to all processes in a group. Then concatenate the received tensors from all
the processes in the group and return single output tensor.
Arguments:
output (Tensor): Gathered cancatenated output tensor.
input (Tensor): Input tensor to scatter.
output_split_sizes: (list[Int], optional): Output split sizes for dim 0
if specified None or empty, dim 0 of ``output`` tensor must divide
equally by ``world_size``.
input_split_sizes: (list[Int], optional): Input split sizes for dim 0
if specified None or empty, dim 0 of ``input`` tensor must divide
equally by ``world_size``.
Returns:
Tensor: Output of the collective.
"""
return _AlltoAllSingle.apply(
group, output, output_split_sizes, input_split_sizes, input
)
def all_reduce(tensor, op=ReduceOp.SUM, group=group.WORLD):
"""
Reduces the tensor data across all machines in such a way that all get
the final result.
After the call the returned tensor is going to be bitwise
identical in all processes.
Arguments:
tensor (Tensor): Input of the collective.
op (optional): One of the values from
``torch.distributed.ReduceOp``
enum. Specifies an operation used for element-wise reductions.
group (ProcessGroup, optional): The process group to work on.
Returns:
Tensor: Output of the collective
"""
return _AllReduce.apply(op, group, tensor)
class _Broadcast(Function):
@staticmethod
def forward(ctx, src, group, tensor):
ctx.src = src
ctx.group = group
ctx.rank = dist.get_rank()
# torch.distributed makes all the calls in place
# we allocate new tensors to avoid this
tensor = tensor.clone()
dist.broadcast(tensor, src, group=group)
return tensor
@staticmethod
def backward(ctx, grad_output):
gx = _Reduce.apply(ctx.src, ReduceOp.SUM, ctx.group, grad_output)
if ctx.src != ctx.rank:
gx.zero_()
return (None, None, gx)
class _Gather(Function):
@staticmethod
def forward(ctx, dst, group, tensor):
ctx.dst = dst
ctx.group = group
# Need to create a list of tensors here to do the
# aggregation, get it from the group size
# tensor should be correctly sized for the method
# gathering
tensor_list = [
torch.zeros_like(tensor) for i in range(dist.get_world_size(group=group))
]
tensor = tensor.contiguous()
if dist.get_rank(group=group) == dst:
dist.gather(tensor, tensor_list, dst, group=group)
else:
dist.gather(tensor, None, dst, group=group)
return tuple(tensor_list)
@staticmethod
def backward(ctx, *grad_outputs):
return (None, None) + (_Scatter.apply(ctx.dst, ctx.group, *grad_outputs),)
class _Scatter(Function):
@staticmethod
def forward(ctx, src, group, *tensors):
ctx.src = src
ctx.group = group
assert all(t.size() == tensors[0].size() for t in tensors)
output = torch.zeros_like(tensors[0])
if dist.get_rank(group=group) == src:
dist.scatter(output, list(tensors), src, group=group)
else:
dist.scatter(output, None, src, group=group)
return output
@staticmethod
def backward(ctx, grad_output):
return (None, None) + _Gather.apply(ctx.src, ctx.group, grad_output)
class _Reduce(Function):
@staticmethod
def forward(ctx, src, op, group, tensor):
ctx.src = src
ctx.group = group
tensor = tensor.clone()
dist.reduce(tensor, src, op=op, group=group)
return tensor
@staticmethod
def backward(ctx, grad_output):
return (None, None, None) + (_Broadcast.apply(ctx.src, ctx.group, grad_output),)
class _Reduce_Scatter(Function):
@staticmethod
def forward(ctx, op, group, tensor, *input_tensor_list):
ctx.group = group
input_tensor_list = tuple(t.contiguous() for t in input_tensor_list)
dist.reduce_scatter(tensor, list(input_tensor_list), op=op, group=group)
return tensor
@staticmethod
def backward(ctx, grad_output):
return (None, None, None) + _AllGather.apply(ctx.group, grad_output)
class _AllGather(Function):
@staticmethod
def forward(ctx, group, tensor):
# Need contiguous tensors for collectives.
tensor = tensor.contiguous()
ctx.group = group
out_tensor_list = [
torch.empty_like(tensor) for _ in range(dist.get_world_size(group=group))
]
dist.all_gather(out_tensor_list, tensor, group=group)
return tuple(out_tensor_list)
@staticmethod
def backward(ctx, *grad_outputs):
if dist.get_backend(group=ctx.group) is dist.Backend.NCCL:
rank = dist.get_rank()
gx = torch.empty_like(grad_outputs[rank])
_Reduce_Scatter.apply(ReduceOp.SUM, ctx.group, gx, *grad_outputs)
else:
# As many backends doesn't support ReduceScatter, we use AlltoAll with .sum()
# to emulate the ReduceScatter behavior
tensor_list = [torch.empty_like(tensor) for tensor in grad_outputs]
gxs = _AlltoAll.apply(ctx.group, tensor_list, *grad_outputs)
gx = torch.sum(torch.stack(gxs), dim=0)
return (None, gx)
class _AllGatherBase(Function):
@staticmethod
def forward(ctx, output_tensor, input_tensor, group):
ctx.group = group
dist._all_gather_base(output_tensor, input_tensor.contiguous(), group=group)
return output_tensor
@staticmethod
def backward(ctx, grad_output):
if dist.get_backend(group=ctx.group) is dist.Backend.NCCL:
world_size = dist.get_world_size(group=ctx.group)
out_size = list(grad_output.size())
if out_size[0] % world_size != 0:
raise RuntimeError(
f'Tensor with dimensions: {out_size} does '
f'not have first dimension divisible by world_size: {world_size}'
)
out_size[0] = out_size[0] // dist.get_world_size(group=ctx.group)
gx = torch.empty(out_size, device=grad_output.device, dtype=grad_output.dtype)
dist._reduce_scatter_base(gx, grad_output, ReduceOp.SUM, ctx.group)
else:
raise RuntimeError("Backend not supported!")
return (None, gx, None)
class _AlltoAll(Function):
@staticmethod
def forward(ctx, group, out_tensor_list, *tensors):
ctx.group = group
ctx.input_tensor_size_list = [
tensors[i].size() for i in range(dist.get_world_size(group=group))
]
my_rank = dist.get_rank(group=group)
tensors = tuple(t.contiguous() for t in tensors)
# Implement it on means of scatter/gather, send/recv async operations have issues
if dist.get_backend(group=group) is dist.Backend.GLOO:
for i in range(dist.get_world_size(group=group)):
to_send = None
if i == my_rank:
to_send = list(tensors)
dist.scatter(out_tensor_list[i], to_send, i, group=group)
else:
dist.all_to_all(
out_tensor_list,
list(tensors),
group=group,
)
return tuple(out_tensor_list)
@staticmethod
def backward(ctx, *grad_outputs):
tensor_list = [
torch.empty(size, device=grad_outputs[0].device, dtype=grad_outputs[0].dtype)
for size in ctx.input_tensor_size_list
]
return (None, None) + _AlltoAll.apply(ctx.group, tensor_list, *grad_outputs)
class _AlltoAllSingle(Function):
@staticmethod
def forward(ctx, group, output, output_split_sizes, input_split_sizes, input):
ctx.group = group
ctx.input_size = input.size()
ctx.output_split_sizes = input_split_sizes
ctx.input_split_sizes = output_split_sizes
dist.all_to_all_single(
output,
input,
output_split_sizes=output_split_sizes,
input_split_sizes=input_split_sizes,
group=group,
)
return output
@staticmethod
def backward(ctx, grad_output):
tensor = torch.empty(ctx.input_size, device=grad_output.device, dtype=grad_output.dtype)
return (None, None, None, None) + (
_AlltoAllSingle.apply(
ctx.group,
tensor,
ctx.output_split_sizes,
ctx.input_split_sizes,
grad_output.contiguous(),
),
)
class _AllReduce(Function):
@staticmethod
def forward(ctx, op, group, tensor):
ctx.group = group
ctx.op = op
tensor = tensor.clone()
dist.all_reduce(tensor, op=op, group=group)
return tensor
@staticmethod
def backward(ctx, grad_output):
return (None, None) + (_AllReduce.apply(ctx.op, ctx.group, grad_output),)
| pytorch-master | torch/distributed/nn/functional.py |
#!/usr/bin/python3
import importlib
import logging
import os
import sys
import tempfile
from typing import Optional
import torch
from torch.distributed.nn.jit.templates.remote_module_template import (
get_remote_module_template,
)
logger = logging.getLogger(__name__)
_FILE_PREFIX = "_remote_module_"
_TEMP_DIR = tempfile.TemporaryDirectory()
INSTANTIATED_TEMPLATE_DIR_PATH = _TEMP_DIR.name
logger.info(f"Created a temporary directory at {INSTANTIATED_TEMPLATE_DIR_PATH}")
sys.path.append(INSTANTIATED_TEMPLATE_DIR_PATH)
def get_arg_return_types_from_interface(module_interface):
assert getattr(
module_interface, "__torch_script_interface__", False
), "Expect a TorchScript class interface decorated by @torch.jit.interface."
qualified_name = torch._jit_internal._qualified_name(module_interface)
cu = torch.jit._state._python_cu
module_interface_c = cu.get_interface(qualified_name)
assert (
"forward" in module_interface_c.getMethodNames()
), "Expect forward in interface methods, while it has {}".format(
module_interface_c.getMethodNames()
)
method_schema = module_interface_c.getMethod("forward")
arg_str_list = []
arg_type_str_list = []
assert method_schema is not None
for argument in method_schema.arguments:
arg_str_list.append(argument.name)
if argument.has_default_value():
default_value_str = " = {}".format(argument.default_value)
else:
default_value_str = ""
arg_type_str = "{name}: {type}{default_value}".format(
name=argument.name, type=argument.type, default_value=default_value_str
)
arg_type_str_list.append(arg_type_str)
arg_str_list = arg_str_list[1:] # Remove "self".
args_str = ", ".join(arg_str_list)
arg_type_str_list = arg_type_str_list[1:] # Remove "self".
arg_types_str = ", ".join(arg_type_str_list)
assert len(method_schema.returns) == 1
argument = method_schema.returns[0]
return_type_str = str(argument.type)
return args_str, arg_types_str, return_type_str
def _write(out_path, text):
old_text: Optional[str]
try:
with open(out_path, "r") as f:
old_text = f.read()
except IOError:
old_text = None
if old_text != text:
with open(out_path, "w") as f:
logger.info("Writing {}".format(out_path))
f.write(text)
else:
logger.info("Skipped writing {}".format(out_path))
def _do_instantiate_remote_module_template(
generated_module_name, str_dict, enable_moving_cpu_tensors_to_cuda
):
generated_code_text = get_remote_module_template(
enable_moving_cpu_tensors_to_cuda
).format(**str_dict)
out_path = os.path.join(
INSTANTIATED_TEMPLATE_DIR_PATH, f"{generated_module_name}.py"
)
_write(out_path, generated_code_text)
# From importlib doc,
# > If you are dynamically importing a module that was created since
# the interpreter began execution (e.g., created a Python source file),
# you may need to call invalidate_caches() in order for the new module
# to be noticed by the import system.
importlib.invalidate_caches()
generated_module = importlib.import_module(f"{generated_module_name}")
return generated_module
def instantiate_scriptable_remote_module_template(
module_interface_cls, enable_moving_cpu_tensors_to_cuda=True
):
if not getattr(module_interface_cls, "__torch_script_interface__", False):
raise ValueError(
f"module_interface_cls {module_interface_cls} must be a type object decorated by "
"@torch.jit.interface"
)
# Generate the template instance name.
module_interface_cls_name = torch._jit_internal._qualified_name(
module_interface_cls
).replace(".", "_")
generated_module_name = f"{_FILE_PREFIX}{module_interface_cls_name}"
# Generate type annotation strs.
assign_module_interface_cls_str = (
f"from {module_interface_cls.__module__} import "
f"{module_interface_cls.__name__} as module_interface_cls"
)
args_str, arg_types_str, return_type_str = get_arg_return_types_from_interface(
module_interface_cls
)
kwargs_str = ""
arrow_and_return_type_str = f" -> {return_type_str}"
arrow_and_future_return_type_str = f" -> Future[{return_type_str}]"
str_dict = dict(
assign_module_interface_cls=assign_module_interface_cls_str,
arg_types=arg_types_str,
arrow_and_return_type=arrow_and_return_type_str,
arrow_and_future_return_type=arrow_and_future_return_type_str,
args=args_str,
kwargs=kwargs_str,
jit_script_decorator="@torch.jit.script",
)
return _do_instantiate_remote_module_template(
generated_module_name, str_dict, enable_moving_cpu_tensors_to_cuda
)
def instantiate_non_scriptable_remote_module_template():
generated_module_name = f"{_FILE_PREFIX}non_scriptable"
str_dict = dict(
assign_module_interface_cls="module_interface_cls = None",
args="*args",
kwargs="**kwargs",
arg_types="*args, **kwargs",
arrow_and_return_type="",
arrow_and_future_return_type="",
jit_script_decorator="",
)
# For a non-scriptable template, always enable moving CPU tensors to a cuda device,
# because there is no syntax limitation on the extra handling caused by the script.
return _do_instantiate_remote_module_template(generated_module_name, str_dict, True)
| pytorch-master | torch/distributed/nn/jit/instantiator.py |
pytorch-master | torch/distributed/nn/jit/__init__.py |
|
pytorch-master | torch/distributed/nn/jit/templates/__init__.py |
|
#!/usr/bin/python3
def get_remote_module_template(enable_moving_cpu_tensors_to_cuda: bool):
return _TEMPLATE_PREFIX + (
_REMOTE_FORWARD_TEMPLATE_ENABLE_MOVING_CPU_TENSORS_TO_CUDA
if enable_moving_cpu_tensors_to_cuda
else _REMOTE_FORWARD_TEMPLATE
)
_TEMPLATE_PREFIX = """from typing import *
import torch
import torch.distributed.rpc as rpc
from torch import Tensor
from torch._jit_internal import Future
from torch.distributed.rpc import RRef
from typing import Tuple # pyre-ignore: unused import
{assign_module_interface_cls}
def forward_async(self, {arg_types}){arrow_and_future_return_type}:
args = (self.module_rref, self.device, self.is_device_map_set, {args})
kwargs = {{{kwargs}}}
return rpc.rpc_async(
self.module_rref.owner(),
_remote_forward,
args,
kwargs,
)
def forward(self, {arg_types}){arrow_and_return_type}:
args = (self.module_rref, self.device, self.is_device_map_set, {args})
kwargs = {{{kwargs}}}
ret_fut = rpc.rpc_async(
self.module_rref.owner(),
_remote_forward,
args,
kwargs,
)
return ret_fut.wait()
_generated_methods = [
forward_async,
forward,
]
{jit_script_decorator}
"""
# This template may cause typing error (the mismatch between ``Tuple[()]`` and ``Tuple[Any]``)
# even if the code is only used for instaniation but not execution.
# Therefore, only include handling moving CPU tensors to a cuda device if necessary.
# TODO: Merge these two templates together in the future once TorchScript syntax is improved.
_REMOTE_FORWARD_TEMPLATE_ENABLE_MOVING_CPU_TENSORS_TO_CUDA = """
def _remote_forward(
module_rref: RRef[module_interface_cls], device: str, is_device_map_set: bool, {arg_types}){arrow_and_return_type}:
module = module_rref.local_value()
device = torch.device(device)
if device.type != "cuda":
return module.forward({args}, {kwargs})
# If the module is on a cuda device,
# move any CPU tensor in args or kwargs to the same cuda device.
# Since torch script does not support generator expression,
# have to use concatenation instead of
# ``tuple(i.to(device) if isinstance(i, Tensor) else i for i in *args)``.
args = ({args},)
out_args: Tuple[()] = ()
for arg in args:
arg = (arg.to(device),) if isinstance(arg, Tensor) else (arg,)
out_args = out_args + arg
kwargs = {{{kwargs}}}
for k, v in kwargs.items():
if isinstance(v, Tensor):
kwargs[k] = kwargs[k].to(device)
if is_device_map_set:
return module.forward(*out_args, {kwargs})
# If the device map is empty, then only CPU tensors are allowed to send over wire,
# so have to move any GPU tensor to CPU in the output.
# Since torch script does not support generator expression,
# have to use concatenation instead of
# ``tuple(i.cpu() if isinstance(i, Tensor) else i for i in module.forward(*out_args, {kwargs}))``.
ret: Tuple[()] = ()
for i in module.forward(*out_args, {kwargs}):
i = (i.cpu(),) if isinstance(i, Tensor) else (i,)
ret = ret + i
return ret
"""
_REMOTE_FORWARD_TEMPLATE = """
def _remote_forward(
module_rref: RRef[module_interface_cls], device: str, is_device_map_set: bool, {arg_types}){arrow_and_return_type}:
module = module_rref.local_value()
return module.forward({args}, {kwargs})
"""
| pytorch-master | torch/distributed/nn/jit/templates/remote_module_template.py |
#!/usr/bin/python3
import collections
import io
import sys
import types
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Mapping,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
)
import torch
import torch.distributed.rpc as rpc
from torch import Tensor, device, dtype, nn
from torch.distributed.nn.jit import instantiator
from torch.distributed import _remote_device
from torch.distributed.rpc.internal import _internal_rpc_pickler
from torch.nn import Module
from torch.nn.parameter import Parameter
from torch.utils.hooks import RemovableHandle
_grad_t = Union[Tuple[Tensor, ...], Tensor]
# See https://mypy.readthedocs.io/en/latest/generics.html#generic-methods-and-generic-self for the use
# of `T` to annotate `self`. Many methods of `Module` return `self` and we want those return values to be
# the type of the subclass, not the looser type of `Module`.
T = TypeVar("T", bound="Module")
_NON_SCRIPTABLE_REMOTE_MODULE_MODULE = (
instantiator.instantiate_non_scriptable_remote_module_template()
)
_REMOTE_MODULE_PICKLED_ATTRIBUTES = (
"on",
"device",
"is_device_map_set",
"is_scriptable",
"generated_methods",
"module_rref",
)
_SerializedRemoteModule = collections.namedtuple("_SerializedRemoteModule", _REMOTE_MODULE_PICKLED_ATTRIBUTES) # type: ignore[misc]
# These attributes are mostly from RemoteModule's parent class and are intentionally not pickled.
# A new attribute of RemoteModule should be either in _REMOTE_MODULE_PICKLED_ATTRIBUTES
# or _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING.
# Otherwise, it will not be pickled.
_REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING = (
"training",
"_parameters",
"_buffers",
"_non_persistent_buffers_set",
"_backward_hooks",
"_is_full_backward_hook",
"_forward_hooks",
"_forward_pre_hooks",
"_state_dict_hooks",
"_load_state_dict_pre_hooks",
"_load_state_dict_post_hooks",
"_modules",
# The two attributes below are generated methods, not available at pickling time.
"forward_async",
"forward",
)
# RPC handler.
def _instantiate_template(module_interface_cls, enable_moving_cpu_tensors_to_cuda):
instantiator.instantiate_scriptable_remote_module_template(
module_interface_cls, enable_moving_cpu_tensors_to_cuda
)
def _create_module(module_cls, args, kwargs, device):
module = module_cls(*args, **kwargs)
if not isinstance(module, nn.Module):
raise ValueError(
"Expect `module_cls(*args, **kwargs)` returns an instance of <class nn.Module>, "
f"but it returns an instance of {type(module)}."
)
module.to(device)
return module
def _create_module_with_interface(
module_cls, args, kwargs, device, module_interface_cls
):
module = _create_module(module_cls, args, kwargs, device)
if module_interface_cls is not None:
module = torch.jit.script(module)
return rpc.RRef(module, module_interface_cls)
def _param_rrefs(module_rref, recurse) -> List[rpc.RRef[Parameter]]:
ret: List[rpc.RRef[Parameter]] = []
for param in module_rref.local_value().parameters(recurse):
ret.append(rpc.RRef(param))
return ret
def _raise_not_supported(name: str) -> None:
raise ValueError("Method ``{}`` not supported for RemoteModule".format(name))
class _RemoteModule(nn.Module):
def __new__(cls, *args, **kwargs):
# Use __new__ for logging purposes.
torch._C._log_api_usage_once("torch.distributed.nn.api.remote_module")
return super(_RemoteModule, cls).__new__(cls)
def __init__(
self,
remote_device: str,
module_cls: Type[nn.Module],
args: Tuple = None,
kwargs: Dict[str, Any] = None,
_module_interface_cls: Any = None,
):
"""
A RemoteModule instance can only be created after RPC initialization.
It creates a user-specified module on a specified remote node.
It behaves like a regular ``nn.Module`` except that the ``forward`` method is
executed on the remote node.
It takes care of autograd recording to ensure the backward pass propogates
gradients back to the corresponding remote module.
It can be shared across processors using `RPC framework <https://pytorch.org/docs/stable/rpc.html>`__,
without incurring any overheads of copying the actual module,
which is equivalent to an :class:`~torch.distributed.rpc.RRef`
pointing to the remote module.
The arguments of ``forward_async`` and ``forward`` are the same as
the ``forward`` method of the module returned by the ``module_cls``.
Apart from ``forward_async`` and ``forward``, no other methods are supported from nn.Module for now.
Particularly, to create a hybrid model, typically the local modules should be
created outside of remote modules, rather than as submodules of any remote module (by calling ``add_module``).
Hybrid Example:
>>> class HybridModel(nn.Module):
>>> def __init__(self):
>>> nn.Module.__init__(self)
>>> self.remote_embedding = RemoteModule(...)
>>> self.local_linear = nn.Linear(...)
For example, if ``module_cls`` returns an instance of ``nn.Linear``,
that has ``forward`` method signature, ``def forward(input: Tensor) -> Tensor:``,
the generated ``RemoteModule`` will have 2 methods in signature of
``def forward(input: Tensor) -> Tensor:`` and
``def forward_async(input: Tensor) -> Future[Tensor]:``.
.. note::
If the remote module is placed on a cuda device,
any input CPU tensors will be automatically moved to the same cuda device,
and GPU tensors are returned over the wire according to the device map of the remote worker on TensorPipe RPC backend.
Args:
remote_device (str): Device on the destination worker where we'd like to place this module.
The device can be a local device or a remote device specified by one of the following remote
formats:
1. "rank:<rank>/<device>" (ex: "rank:0/cuda:0").
2. "<worker_name>/<device>" (ex: "trainer0/cuda:0").
In addition, the device field can be optional and the default value is "cpu".
module_cls (nn.Module): For example,
>>> class MyModule(nn.Module):
>>> def forward(input):
>>> return input + 1
>>>
>>> module_cls = MyModule
args (Sequence, optional): args to be passed to ``module_cls``.
kwargs (Dict, optional): kwargs to be passed to ``module_cls``.
_module_interface_cls (type, optional): The TorchScript interface type for the module
to be created. The type object should be decorated by @torch.jit.interface.
If not provided, the generated RemoteModule is not torchscript-able.
Warning, this is an experimental API and susceptible to frequent changes.
Returns:
A remote module instance which wraps the :class:`~nn.Module` created by the
user-provided ``module_cls``, it has a blocking ``forward`` method and an
asynchronous ``forward_async`` method that returns a future of the ``forward`` call
on the user-provided module on the remote side.
Example::
Run the following code in two different processes:
>>> # On worker 0:
>>> import torch
>>> import torch.distributed.rpc as rpc
>>> from torch import nn, Tensor
>>> from torch.distributed.nn.api.remote_module import RemoteModule
>>>
>>> # xdoctest: +SKIP
>>> rpc.init_rpc("worker0", rank=0, world_size=2)
>>> remote_linear_module = RemoteModule(
>>> "worker1/cpu", nn.Linear, args=(20, 30),
>>> )
>>> input = torch.randn(128, 20)
>>> ret_fut = remote_linear_module.forward_async(input)
>>> ret = ret_fut.wait()
>>> rpc.shutdown()
>>> # On worker 1:
>>> import torch
>>> import torch.distributed.rpc as rpc
>>>
>>> rpc.init_rpc("worker1", rank=1, world_size=2)
>>> rpc.shutdown()
"""
super().__init__()
enable_moving_cpu_tensors_to_cuda = self._prepare_init(remote_device)
# Default arguments preperation.
args = args if args is not None else ()
kwargs = kwargs if kwargs is not None else {}
if _module_interface_cls is not None:
# Users reply on this field to know if this generated RemoteModule is TorchScript-able.
self.is_scriptable = True
# Instantiate template on remote side.
fut = rpc.rpc_async(
self.on,
_instantiate_template,
(_module_interface_cls, enable_moving_cpu_tensors_to_cuda),
)
self._init_template(
_module_interface_cls, enable_moving_cpu_tensors_to_cuda
)
# Instantiate template on remote side.
fut = rpc.rpc_async(
self.on,
_instantiate_template,
(_module_interface_cls, enable_moving_cpu_tensors_to_cuda),
)
# Create the module on the remote side.
fut.wait() # Ensure remote_module_cls is available on remote side.
# TODO: We need to change this to rpc.remote, and make it async (see the else branch below).
# For that we need to be able to apply _module_interface_cls to the RRef returned by rpc.remote
# See https://github.com/pytorch/pytorch/issues/58098 for more context.
self.module_rref = rpc.rpc_sync(
self.on,
_create_module_with_interface,
(module_cls, args, kwargs, self.device, _module_interface_cls),
)
else:
self.is_scriptable = False
self.generated_methods = (
_NON_SCRIPTABLE_REMOTE_MODULE_MODULE._generated_methods
)
# Create the module on the remote side.
self.module_rref = rpc.remote(
self.on,
_create_module,
(module_cls, args, kwargs, self.device),
)
self._install_generated_methods()
self._check_attribute_picklability()
def remote_parameters(self, recurse: bool = True) -> List[rpc.RRef[Parameter]]:
"""
Returns a list of :class:`~torch.distributed.rpc.RRef` pointing to the
remote module's parameters. This can typically be used in conjuction
with :class:`~torch.distributed.optim.DistributedOptimizer`.
Args:
recurse (bool): if True, then returns parameters of the remote
module and all submodules of the remote module. Otherwise,
returns only parameters that are direct members of the
remote module.
Returns:
A list of :class:`~torch.distributed.rpc.RRef` (``List[RRef[nn.Parameter]]``)
to remote module's parameters.
"""
return rpc.rpc_sync(self.on, _param_rrefs, args=(self.module_rref, recurse))
def get_module_rref(self) -> rpc.RRef[nn.Module]:
"""
Returns an :class:`~torch.distributed.rpc.RRef` (``RRef[nn.Module]``)
pointing to the remote module.
"""
return self.module_rref
@torch.jit.export
def __getstate__(self):
raise RuntimeError(
"Cannot pickle RemoteModule in python pickler. RemoteModule can only be pickled when using RPC"
)
@torch.jit.export
def __setstate__(self, state):
raise RuntimeError(
"Cannot unpickle RemoteModule in python pickler. RemoteModule can only be unpickled when using RPC"
)
def register_buffer(
self, name: str, tensor: Optional[Tensor], persistent: bool = True
) -> None:
_raise_not_supported(self.register_buffer.__name__)
def register_parameter(self, name: str, param: Optional[Parameter]) -> None:
_raise_not_supported(self.register_parameter.__name__)
def add_module(self, name: str, module: Optional[Module]) -> None:
_raise_not_supported(self.add_module.__name__)
def apply(self: T, fn: Callable[[Module], None]) -> T: # type: ignore[return]
_raise_not_supported(self.apply.__name__)
def cuda(self: T, device: Optional[Union[int, device]] = None) -> T: # type: ignore[return]
_raise_not_supported(self.cuda.__name__)
def ipu(self: T, device: Optional[Union[int, device]] = None) -> T: # type: ignore[return]
_raise_not_supported(self.ipu.__name__)
def xpu(self: T, device: Optional[Union[int, device]] = None) -> T: # type: ignore[return]
_raise_not_supported(self.xpu.__name__)
def cpu(self: T) -> T: # type: ignore[return]
_raise_not_supported(self.cpu.__name__)
def type(self: T, dst_type: Union[dtype, str]) -> T: # type: ignore[return]
_raise_not_supported(self.type.__name__)
def float(self: T) -> T: # type: ignore[return]
_raise_not_supported(self.float.__name__)
def double(self: T) -> T: # type: ignore[return]
_raise_not_supported(self.double.__name__)
def half(self: T) -> T: # type: ignore[return]
_raise_not_supported(self.half.__name__)
def bfloat16(self: T) -> T: # type: ignore[return]
_raise_not_supported(self.bfloat16.__name__)
def to(self, *args, **kwargs) -> T: # type: ignore[return]
_raise_not_supported(self.to.__name__)
def register_backward_hook( # type: ignore[return]
self, hook: Callable[[Module, _grad_t, _grad_t], Union[None, Tensor]]
) -> RemovableHandle:
_raise_not_supported(self.register_backward_hook.__name__)
def register_forward_pre_hook(self, hook: Callable[..., None]) -> RemovableHandle: # type: ignore[return]
_raise_not_supported(self.register_forward_pre_hook.__name__)
def register_forward_hook(self, hook: Callable[..., None]) -> RemovableHandle: # type: ignore[return]
_raise_not_supported(self.register_forward_hook.__name__)
def state_dict(self, *args, **kwargs):
_raise_not_supported(self.state_dict.__name__)
def load_state_dict(
self,
state_dict: Mapping[str, Any],
strict: bool = True,
):
_raise_not_supported(self.load_state_dict.__name__)
def parameters(self, recurse: bool = True) -> Iterator[Parameter]:
raise ValueError(
"Method ``parameters`` not supported for RemoteModule. Please use ``remote_parameters`` instead."
)
def named_parameters( # type: ignore[return]
self, prefix: str = "", recurse: bool = True
) -> Iterator[Tuple[str, Parameter]]:
_raise_not_supported(self.named_parameters.__name__)
def buffers(self, recurse: bool = True) -> Iterator[Tensor]: # type: ignore[return]
_raise_not_supported(self.buffers.__name__)
def named_buffers( # type: ignore[return]
self, prefix: str = "", recurse: bool = True
) -> Iterator[Tuple[str, Tensor]]:
_raise_not_supported(self.named_buffers.__name__)
def children(self) -> Iterator[Module]: # type: ignore[return]
_raise_not_supported(self.children.__name__)
def named_children(self) -> Iterator[Tuple[str, Module]]: # type: ignore[return]
_raise_not_supported(self.named_children.__name__)
def modules(self) -> Iterator[Module]: # type: ignore[return]
_raise_not_supported(self.modules.__name__)
def named_modules(
self,
memo: Optional[Set[Module]] = None,
prefix: str = "",
remove_duplicate: bool = True,
):
_raise_not_supported(self.named_modules.__name__)
def train(self: T, mode: bool = True) -> T:
return self.module_rref.rpc_sync().train() # type: ignore[operator, union-attr]
def eval(self: T) -> T:
return self.module_rref.rpc_sync().eval() # type: ignore[operator, union-attr]
def requires_grad_(self: T, requires_grad: bool = True) -> T: # type: ignore[return]
_raise_not_supported(self.requires_grad_.__name__)
def zero_grad(self, set_to_none: bool = False) -> None:
_raise_not_supported(self.zero_grad.__name__)
def share_memory(self: T) -> T: # type: ignore[return]
_raise_not_supported(self.share_memory.__name__)
def extra_repr(self) -> str: # type: ignore[return]
_raise_not_supported(self.extra_repr.__name__)
def _prepare_init(self, remote_device_str: str) -> bool:
"""
Prepares the initializaiton and returns whether to enable automatically moving CPU tensors to CUDA devices.
"""
# Sanity check.
assert rpc._is_current_rpc_agent_set(), "RemoteModule only works in RPC."
remote_device = _remote_device(remote_device_str)
self.on = remote_device.worker_name() if remote_device.worker_name() is not None else remote_device.rank()
self.device = str(remote_device.device())
agent = rpc._get_current_rpc_agent()
# If the device map of the remote worker is set,
# then enable moving any input CPU tensors to the same cuda device.
self.is_device_map_set = bool(
agent._get_device_map(agent.get_worker_info(self.on)) # type: ignore[arg-type]
)
# ``enable_moving_cpu_tensors_to_cuda`` is less strict than ``is_device_map_set``:
# If ``enable_moving_cpu_tensors_to_cuda`` is true, but the device map is not set,
# then any CPU tensors can still be moved to a cuda device to run forward,
# but the output must be moved back to CPU before being sent over the wire.
enable_moving_cpu_tensors_to_cuda = torch.device(self.device).type == "cuda"
return enable_moving_cpu_tensors_to_cuda
def _init_template(self, module_interface_cls, enable_moving_cpu_tensors_to_cuda):
"""
Instantiates template on local side.
"""
generated_module = instantiator.instantiate_scriptable_remote_module_template(
module_interface_cls, enable_moving_cpu_tensors_to_cuda
)
self.generated_methods = generated_module._generated_methods
def _check_attribute_picklability(self):
"""
Checks if all the attribute has explicitly defined whether to be pickled (i.e., picklability).
"""
for k in self.__dict__.keys():
if (
k not in _REMOTE_MODULE_PICKLED_ATTRIBUTES
and k not in _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING
):
raise AttributeError(
"Attribute {} must be either in ``_REMOTE_MODULE_PICKLED_ATTRIBUTES`` or "
"``_REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING``.".format(k)
)
def _install_generated_methods(self):
for method in self.generated_methods:
method_name = method.__name__
method = torch.jit.export(method)
setattr(self, method_name, types.MethodType(method, self))
@staticmethod
def init_from_module_rref(
remote_device: str,
module_rref: rpc.RRef[nn.Module],
_module_interface_cls: Any = None,
):
"""
Besides the constructor, a RemoteModule instance can also be initialized given a module RRef.
This alternate initiailization method can be particularly useful if we want to create multiple
RemoteModule instances that share the same underlying module and reduce memory consumption.
Moreover, this also provides a workaround for passing script RemoteModule over RPC,
which is not supported. The recommended way is as follows:
1. the sender creates a RemoteModule;
2. the sender sends its ``module_rref`` over RPC;
3. the receiver calls this method to initialize another RemoteModule using the same ``module_rref``.
Example::
Run the following code in two different processes:
>>> # On worker 0:
>>> import torch
>>> import torch.distributed.rpc as rpc
>>> from torch import nn, Tensor
>>> from torch.distributed.nn.api.remote_module import RemoteModule
>>>
>>> # xdoctest: +SKIP
>>> rpc.init_rpc("worker0", rank=0, world_size=2)
>>> remote_module = RemoteModule(
>>> "worker1/cpu", nn.Linear, args=(20, 30),
>>> )
>>>
>>> remote_module1 = rpc.rpc_sync(
>>> "worker1/cpu",
>>> RemoteModule.init_from_module_rref,
>>> ("worker1/cpu", remote_module1.get_module_rref()),
>>> )
>>> rpc.shutdown()
>>> # On worker 1:
>>> import torch
>>> import torch.distributed.rpc as rpc
>>>
>>> rpc.init_rpc("worker1", rank=1, world_size=2)
>>> rpc.shutdown()
Args:
remote_device (str): Device on the destination worker where we'd like to place this module.
The device can be a local device or a remote device specified by one of the following remote
formats:
1. "rank:<rank>/<device>" (ex: "rank:0/cuda:0").
2. "<worker_name>/<device>" (ex: "trainer0/cuda:0").
In addition, the device field can be optional and the default value is "cpu".
module_rref (RRef[nn.Module]): The module reference shared by both the caller and
the created remote module.
_module_interface_cls (type, optional): The TorchScript interface type for the module
to be created. The type object should be decorated by @torch.jit.interface.
If not provided, the generated RemoteModule is not torchscript-able.
Warning, this is an experimental API and susceptible to frequent changes.
Returns:
A remote module instance which wraps the :class:`~nn.Module` created by the
user-provided ``module_rref``, it has a blocking ``forward`` method and an
asynchronous ``forward_async`` method that returns a future of the ``forward`` call
on the user-provided module on the remote side.
"""
# NOTE: if a new attribute is added to this class, also need to add it
# to ``_REMOTE_MODULE_PICKLED_ATTRIBUTES`` for pickling/unpickling.
remote_module = object.__new__(RemoteModule)
enable_moving_cpu_tensors_to_cuda = remote_module._prepare_init(remote_device)
if _module_interface_cls is not None:
# Users reply on this field to know if this generated RemoteModule is TorchScript-able.
remote_module.is_scriptable = True
remote_module._init_template(
_module_interface_cls, enable_moving_cpu_tensors_to_cuda
)
else:
remote_module.is_scriptable = False
remote_module.generated_methods = (
_NON_SCRIPTABLE_REMOTE_MODULE_MODULE._generated_methods
)
remote_module.module_rref = module_rref
remote_module._install_generated_methods()
remote_module._check_attribute_picklability()
return remote_module
class RemoteModule(_RemoteModule):
"""
A RemoteModule instance can only be created after RPC initialization.
It creates a user-specified module on a specified remote node.
It behaves like a regular ``nn.Module`` except that the ``forward`` method is
executed on the remote node.
It takes care of autograd recording to ensure the backward pass propogates
gradients back to the corresponding remote module.
It generates two methods ``forward_async`` and ``forward`` based on the
signature of the ``forward`` method of ``module_cls``. ``forward_async``
runs asynchronously and returns a Future. The arguments of ``forward_async``
and ``forward`` are the same as the ``forward`` method of the module
returned by the ``module_cls``.
For example, if ``module_cls`` returns an instance of ``nn.Linear``,
that has ``forward`` method signature: ``def forward(input: Tensor) -> Tensor:``,
the generated ``RemoteModule`` will have 2 methods with the signatures:
| ``def forward(input: Tensor) -> Tensor:``
| ``def forward_async(input: Tensor) -> Future[Tensor]:``
Args:
remote_device (str): Device on the destination worker where we'd like to place this module.
The format should be "<workername>/<device>", where the device field can be parsed as torch.device type.
E.g., "trainer0/cpu", "trainer0", "ps0/cuda:0".
In addition, the device field can be optional and the default value is "cpu".
module_cls (nn.Module): Class for the module to be created remotely. For example,
>>> class MyModule(nn.Module):
>>> def forward(input):
>>> return input + 1
>>>
>>> module_cls = MyModule
args (Sequence, optional): args to be passed to ``module_cls``.
kwargs (Dict, optional): kwargs to be passed to ``module_cls``.
Returns:
A remote module instance which wraps the :class:`~nn.Module` created by the
user-provided ``module_cls``, it has a blocking ``forward`` method and an
asynchronous ``forward_async`` method that returns a future of the ``forward`` call
on the user-provided module on the remote side.
Example::
Run the following code in two different processes:
>>> # On worker 0:
>>> import torch
>>> import torch.distributed.rpc as rpc
>>> from torch import nn, Tensor
>>> from torch.distributed.nn.api.remote_module import RemoteModule
>>>
>>> # xdoctest: +SKIP
>>> rpc.init_rpc("worker0", rank=0, world_size=2)
>>> remote_linear_module = RemoteModule(
>>> "worker1/cpu", nn.Linear, args=(20, 30),
>>> )
>>> input = torch.randn(128, 20)
>>> ret_fut = remote_linear_module.forward_async(input)
>>> ret = ret_fut.wait()
>>> rpc.shutdown()
>>> # On worker 1:
>>> import torch
>>> import torch.distributed.rpc as rpc
>>>
>>> rpc.init_rpc("worker1", rank=1, world_size=2)
>>> rpc.shutdown()
Furthermore, a more practical example that is combined with
`DistributedDataParallel <https://pytorch.org/docs/stable/nn.html#torch.nn.parallel.DistributedDataParallel>`__ (DDP)
can be found in this `tutorial <https://pytorch.org/tutorials/advanced/rpc_ddp_tutorial.html>`__.
"""
def __init__(
self,
remote_device: str,
module_cls: Type[nn.Module],
args: Tuple = None,
kwargs: Dict[str, Any] = None,
):
super().__init__(remote_device, module_cls, args, kwargs)
def _remote_module_receiver(
*remote_module_pickled_attrs,
):
"""
Deserializes a RemoteModule.
"""
serialized_remote_module = _SerializedRemoteModule._make(
remote_module_pickled_attrs
)
m = object.__new__(RemoteModule)
m.__dict__.update(serialized_remote_module._asdict())
# Unpickling the attribute `module_rref` must invoke RRef's `_deserialize()` method.
m.module_rref = rpc.PyRRef._deserialize(m.module_rref)
# Install generated methods when unpickled.
for method in m.generated_methods:
method_name = method.__name__
method = torch.jit.export(method)
setattr(m, method_name, types.MethodType(method, m))
return m
def _remote_module_reducer(remote_module):
"""
Serializes a RemoteModule.
"""
pickled_attrs = {}
for k, v in remote_module.__dict__.items():
# Pickling the attribute `module_rref` must invoke RRef's `_serialize()` method.
if k == "module_rref":
pickled_attrs[k] = v._serialize()
elif k in _REMOTE_MODULE_PICKLED_ATTRIBUTES:
pickled_attrs[k] = v
# Check if unpickled attributes are all in _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING.
elif k not in _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING:
print(
"The new attribute ``{}`` of RemoteModule is ignored during RPC pickling. "
"To pickle this attribute, please add it to ``_REMOTE_MODULE_PICKLED_ATTRIBUTES``. "
"Otherwise, please explicitly add it to ``_REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING``.".format(
k
),
file=sys.stderr,
)
return (
_remote_module_receiver,
tuple(pickled_attrs.values()),
)
def _recursive_script_module_receiver(
recursive_script_module_serialized,
):
"""
Deserializes a RecursiveScirptModule that does not contain a script RemoteModule.
"""
f = io.BytesIO(recursive_script_module_serialized)
m = torch.jit.load(f)
return m
def _recursive_script_module_reducer(recursive_script_module):
"""
Serializes a RecursiveScirptModule that does not contain a script RemoteModule,
and raises an error otherwise.
"""
if hasattr(recursive_script_module._c, "module_rref"):
raise RuntimeError(
"Passing a script RemoteModule over RPC is not supported. Please create a RemoteModule in the sender, "
"send the `module_rref` to the receiver, and create a new instance on the receiver end by passing this `module_rref`."
)
f = io.BytesIO()
torch.jit.save(recursive_script_module, f)
return (_recursive_script_module_receiver, (f.getvalue(),))
_internal_rpc_pickler._register_reducer(RemoteModule, _remote_module_reducer)
_internal_rpc_pickler._register_reducer(
torch.jit.RecursiveScriptModule, _recursive_script_module_reducer
)
| pytorch-master | torch/distributed/nn/api/remote_module.py |
pytorch-master | torch/distributed/nn/api/__init__.py |
|
#!/usr/bin/env/python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
Torchelastic agent and user worker failover contract:
**TL;DR;**:
* TE(torchelastic) expects user workers to finish with the 5 minutes drift
* It is better to design DDP app to fail for all workers, rather than a single one.
* TE does not synchronize number of restarts between agents
* TE re-rendezvous does not trigger restart decrease
* When a single agent finishes its job(successfully or not), it will close rendezvous.
If other agents still have workers in progress, they will be terminated.
* Based on above, scale down does not work if at least single agent finishes the job.
* When Scale up is detected by agents, it will not decrease ``max_restarts``
In general TE(torchelastic) can launch arbitrary user code, but there is some
clarifications need to be done around what failover mechanism torchelastic
provides and what failover mechanism it expects from user workers.
Torchelastic currently supports DDP style applications. That means that
TE expects *ALL* workers finish approximately at the same time. In practice,
it is nearly to impossible to guarantee that all workers in arbitrary
DDP application finish at the time, so TE provides a finalization barrier
that waits for TIMEOUT(5 minutes) for worker finalization.
**Worker Failure**
When worker fails, TE will check the number of restarts
available, if there is more than 0 restarts, TE will start a new rendezvous
round and restart the worker process. New rendezvous round will other
TE agents to terminate their workers.
.. note:: The TE agent does not synchronize restarts between themselves.
When a single agent performs restart, it will trigger a local ``max_restarts``
decrease, other agent will not decrease their ``max_restarts``.
the user to run the distributed application locally on a dev host.
A single worker failure can cause the whole cluster to fail:
If a single worker is constantly failing, it will cause the TE agent
``max_restarts`` to go to zero. This will cause an agent to finish its
work and close rendezvous. If there are any other workers on different
agents, they will be terminated.
**Re-Rendezvous**
Re-rendezvous occurs when TE agents detect a new node
trying to joint a cluster. TE will not decrease ``max_restarts``. TE agents
will terminate its workers and start a new rendezvous round.
Note about DynamicRendezvous(etcd-v2, c10d-experimental): If the rendezvous
has already max_nodes, the new node won't be added to the wait list right
away since there is no need to tear down a rendezvous that is already fully
utilized. The new node will wait until its timeout (600 secs by default)
and periodically check the number of participants. If the number becomes
less than max_nodes, it will be added to the wait list; otherwise, it will time out after 600 secs.
*Scale up event*. When scale up event happens, torchelastic rendezvous
will detect that there are new nodes trying to join. Torchelastic agent
will stop all workers and perform re-rendezvous. Note: when scale up event
happens, *``max_restarts``* will *not* decrease.
*Scale down event*. When scale down event happens, rendezvous will not
notify the torchelastic agent about it. If TE agent launched with ``max_restarts=0`` ,
it relies on the underlying scheduler to handle job restart. If the ``max_restarts>0`` ,
TE agent will terminate workers and start a new rdzv round, which is a *Scale up event*.
"""
| pytorch-master | torch/distributed/elastic/__init__.py |
#!/usr/bin/env/python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""Metrics API
**Overview**:
The metrics API in torchelastic is used to publish telemetry metrics.
It is designed to be used by torchelastic's internal modules to
publish metrics for the end user with the goal of increasing visibility
and helping with debugging. However you may use the same API in your
jobs to publish metrics to the same metrics ``sink``.
A ``metric`` can be thought of as timeseries data
and is uniquely identified by the string-valued tuple
``(metric_group, metric_name)``.
torchelastic makes no assumptions about what a ``metric_group`` is
and what relationship it has with ``metric_name``. It is totally up
to the user to use these two fields to uniquely identify a metric.
.. note:: The metric group ``torchelastic`` is reserved by torchelastic for
platform level metrics that it produces.
For instance torchelastic may output the latency (in milliseconds)
of a re-rendezvous operation from the agent as
``(torchelastic, agent.rendezvous.duration.ms)``
A sensible way to use metric groups is to map them to a stage or module
in your job. You may also encode certain high level properties
the job such as the region or stage (dev vs prod).
**Publish Metrics**:
Using torchelastic's metrics API is similar to using python's logging
framework. You first have to configure a metrics handler before
trying to add metric data.
The example below measures the latency for the ``calculate()`` function.
::
import time
import torch.distributed.elastic.metrics as metrics
# makes all metrics other than the one from "my_module" to go /dev/null
metrics.configure(metrics.NullMetricsHandler())
metrics.configure(metrics.ConsoleMetricsHandler(), "my_module")
def my_method():
start = time.time()
calculate()
end = time.time()
metrics.put_metric("calculate_latency", int(end-start), "my_module")
You may also use the torch.distributed.elastic.metrics.prof` decorator
to conveniently and succinctly profile functions
::
# -- in module examples.foobar --
import torch.distributed.elastic.metrics as metrics
metrics.configure(metrics.ConsoleMetricsHandler(), "foobar")
metrics.configure(metrics.ConsoleMetricsHandler(), "Bar")
@metrics.prof
def foo():
pass
class Bar():
@metrics.prof
def baz():
pass
``@metrics.prof`` will publish the following metrics
::
<leaf_module or classname>.success - 1 if the function finished successfully
<leaf_module or classname>.failure - 1 if the function threw an exception
<leaf_module or classname>.duration.ms - function duration in milliseconds
**Configuring Metrics Handler**:
`torch.distributed.elastic.metrics.MetricHandler` is responsible for emitting
the added metric values to a particular destination. Metric groups can be
configured with different metric handlers.
By default torchelastic emits all metrics to ``/dev/null``.
By adding the following configuration metrics,
``torchelastic`` and ``my_app`` metric groups will be printed out to
console.
::
import torch.distributed.elastic.metrics as metrics
metrics.configure(metrics.ConsoleMetricHandler(), group = "torchelastic")
metrics.configure(metrics.ConsoleMetricHandler(), group = "my_app")
**Writing a Custom Metric Handler**:
If you want your metrics to be emitted to a custom location, implement
the `torch.distributed.elastic.metrics.MetricHandler` interface
and configure your job to use your custom metric handler.
Below is a toy example that prints the metrics to ``stdout``
::
import torch.distributed.elastic.metrics as metrics
class StdoutMetricHandler(metrics.MetricHandler):
def emit(self, metric_data):
ts = metric_data.timestamp
group = metric_data.group_name
name = metric_data.name
value = metric_data.value
print(f"[{ts}][{group}]: {name}={value}")
metrics.configure(StdoutMetricHandler(), group="my_app")
Now all metrics in the group ``my_app`` will be printed to stdout as:
::
[1574213883.4182858][my_app]: my_metric=<value>
[1574213940.5237644][my_app]: my_metric=<value>
"""
from typing import Optional
from .api import ( # noqa: F401
ConsoleMetricHandler,
MetricData,
MetricHandler,
MetricsConfig,
NullMetricHandler,
configure,
get_elapsed_time_ms,
getStream,
prof,
profile,
publish_metric,
put_metric,
)
def initialize_metrics(cfg: Optional[MetricsConfig] = None):
pass
try:
from torch.distributed.elastic.metrics.static_init import * # type: ignore[import] # noqa: F401 F403
except ModuleNotFoundError:
pass
| pytorch-master | torch/distributed/elastic/metrics/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import abc
import time
import warnings
from collections import namedtuple
from functools import wraps
from typing import Dict, Optional
__all__ = ['MetricsConfig', 'MetricHandler', 'ConsoleMetricHandler', 'NullMetricHandler', 'MetricStream',
'configure', 'getStream', 'prof', 'profile', 'put_metric', 'publish_metric', 'get_elapsed_time_ms',
'MetricData']
MetricData = namedtuple("MetricData", ["timestamp", "group_name", "name", "value"])
class MetricsConfig:
__slots__ = ["params"]
def __init__(self, params: Optional[Dict[str, str]] = None):
self.params = params
if self.params is None:
self.params = {}
class MetricHandler(abc.ABC):
@abc.abstractmethod
def emit(self, metric_data: MetricData):
pass
class ConsoleMetricHandler(MetricHandler):
def emit(self, metric_data: MetricData):
print(
"[{}][{}]: {}={}".format(
metric_data.timestamp,
metric_data.group_name,
metric_data.name,
metric_data.value,
)
)
class NullMetricHandler(MetricHandler):
def emit(self, metric_data: MetricData):
pass
class MetricStream:
def __init__(self, group_name: str, handler: MetricHandler):
self.group_name = group_name
self.handler = handler
def add_value(self, metric_name: str, metric_value: int):
self.handler.emit(
MetricData(time.time(), self.group_name, metric_name, metric_value)
)
_metrics_map = {}
_default_metrics_handler: MetricHandler = NullMetricHandler()
# pyre-fixme[9]: group has type `str`; used as `None`.
def configure(handler: MetricHandler, group: str = None):
if group is None:
global _default_metrics_handler
# pyre-fixme[9]: _default_metrics_handler has type `NullMetricHandler`; used
# as `MetricHandler`.
_default_metrics_handler = handler
else:
_metrics_map[group] = handler
def getStream(group: str):
if group in _metrics_map:
handler = _metrics_map[group]
else:
handler = _default_metrics_handler
return MetricStream(group, handler)
def _get_metric_name(fn):
qualname = fn.__qualname__
split = qualname.split(".")
if len(split) == 1:
module = fn.__module__
if module:
return module.split(".")[-1] + "." + split[0]
else:
return split[0]
else:
return qualname
def prof(fn=None, group: str = "torchelastic"):
r"""
@profile decorator publishes duration.ms, count, success, failure
metrics for the function that it decorates. The metric name defaults
to the qualified name (``class_name.def_name``) of the function.
If the function does not belong to a class, it uses the leaf module name
instead.
Usage
::
@metrics.prof
def x():
pass
@metrics.prof(group="agent")
def y():
pass
"""
def wrap(f):
@wraps(f)
def wrapper(*args, **kwargs):
key = _get_metric_name(f)
try:
start = time.time()
result = f(*args, **kwargs)
put_metric(f"{key}.success", 1, group)
except Exception:
put_metric(f"{key}.failure", 1, group)
raise
finally:
put_metric(f"{key}.duration.ms", get_elapsed_time_ms(start), group)
return result
return wrapper
if fn:
return wrap(fn)
else:
return wrap
def profile(group=None):
"""
@profile decorator adds latency and success/failure metrics to any given function.
Usage
::
@metrics.profile("my_metric_group")
def some_function(<arguments>):
"""
warnings.warn("Deprecated, use @prof instead", DeprecationWarning)
def wrap(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
start_time = time.time()
result = func(*args, **kwargs)
publish_metric(group, "{}.success".format(func.__name__), 1)
except Exception:
publish_metric(group, "{}.failure".format(func.__name__), 1)
raise
finally:
publish_metric(
group,
"{}.duration.ms".format(func.__name__),
get_elapsed_time_ms(start_time),
)
return result
return wrapper
return wrap
def put_metric(metric_name: str, metric_value: int, metric_group: str = "torchelastic"):
"""
Publishes a metric data point.
Usage
::
put_metric("metric_name", 1)
put_metric("metric_name", 1, "metric_group_name")
"""
getStream(metric_group).add_value(metric_name, metric_value)
def publish_metric(metric_group: str, metric_name: str, metric_value: int):
warnings.warn(
"Deprecated, use put_metric(metric_group)(metric_name, metric_value) instead"
)
metric_stream = getStream(metric_group)
metric_stream.add_value(metric_name, metric_value)
def get_elapsed_time_ms(start_time_in_seconds: float):
"""
Returns the elapsed time in millis from the given start time.
"""
end_time = time.time()
return int((end_time - start_time_in_seconds) * 1000)
| pytorch-master | torch/distributed/elastic/metrics/api.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import random
import time
from base64 import b64decode, b64encode
from typing import Optional
import etcd # type: ignore[import]
# pyre-ignore[21]: Could not find name `Store` in `torch.distributed`.
from torch.distributed import Store
# Delay (sleep) for a small random amount to reduce CAS failures.
# This does not affect correctness, but will reduce requests to etcd server.
def cas_delay():
time.sleep(random.uniform(0, 0.1))
# pyre-fixme[11]: Annotation `Store` is not defined as a type.
class EtcdStore(Store):
"""
Implements a c10 Store interface by piggybacking on the rendezvous etcd
instance. This is the store object returned by ``EtcdRendezvous``
"""
def __init__(
self,
etcd_client,
etcd_store_prefix,
# Default timeout same as in c10d/Store.hpp
timeout: Optional[datetime.timedelta] = None,
):
super().__init__() # required for pybind trampoline.
self.client = etcd_client
self.prefix = etcd_store_prefix
if timeout is not None:
self.set_timeout(timeout)
if not self.prefix.endswith("/"):
self.prefix += "/"
def set(self, key, value):
"""
Write a key/value pair into ``EtcdStore``.
Both key and value may be either Python ``str`` or ``bytes``.
"""
self.client.set(key=self.prefix + self._encode(key), value=self._encode(value))
def get(self, key) -> bytes:
"""
Get a value by key, possibly doing a blocking wait.
If key is not immediately present, will do a blocking wait
for at most ``timeout`` duration or until the key is published.
Returns:
value ``(bytes)``
Raises:
LookupError - If key still not published after timeout
"""
b64_key = self.prefix + self._encode(key)
kvs = self._try_wait_get([b64_key])
if kvs is None:
raise LookupError(f"Key {key} not found in EtcdStore")
return self._decode(kvs[b64_key])
def add(self, key, num: int) -> int:
"""
Atomically increment a value by an integer amount. The integer is
represented as a string using base 10. If key is not present,
a default value of ``0`` will be assumed.
Returns:
the new (incremented) value
"""
b64_key = self._encode(key)
# c10d Store assumes value is an integer represented as a decimal string
try:
# Assume default value "0", if this key didn't yet:
node = self.client.write(
key=self.prefix + b64_key,
value=self._encode(str(num)), # i.e. 0 + num
prevExist=False,
)
return int(self._decode(node.value))
except etcd.EtcdAlreadyExist:
pass
while True:
# Note: c10d Store does not have a method to delete keys, so we
# can be sure it's still there.
node = self.client.get(key=self.prefix + b64_key)
new_value = self._encode(str(int(self._decode(node.value)) + num))
try:
node = self.client.test_and_set(
key=node.key, value=new_value, prev_value=node.value
)
return int(self._decode(node.value))
except etcd.EtcdCompareFailed:
cas_delay()
def wait(self, keys, override_timeout: Optional[datetime.timedelta] = None):
"""
Waits until all of the keys are published, or until timeout.
Raises:
LookupError - if timeout occurs
"""
b64_keys = [self.prefix + self._encode(key) for key in keys]
kvs = self._try_wait_get(b64_keys, override_timeout)
if kvs is None:
raise LookupError("Timeout while waiting for keys in EtcdStore")
# No return value on success
def check(self, keys) -> bool:
"""
Check if all of the keys are immediately present (without waiting).
"""
b64_keys = [self.prefix + self._encode(key) for key in keys]
kvs = self._try_wait_get(
b64_keys,
override_timeout=datetime.timedelta(microseconds=1), # as if no wait
)
return kvs is not None
#
# Encode key/value data in base64, so we can store arbitrary binary data
# in EtcdStore. Input can be `str` or `bytes`.
# In case of `str`, utf-8 encoding is assumed.
#
def _encode(self, value) -> str:
if type(value) == bytes:
return b64encode(value).decode()
elif type(value) == str:
return b64encode(value.encode()).decode()
raise ValueError("Value must be of type str or bytes")
#
# Decode a base64 string (of type `str` or `bytes`).
# Return type is `bytes`, which is more convenient with the Store interface.
#
def _decode(self, value) -> bytes:
if type(value) == bytes:
return b64decode(value)
elif type(value) == str:
return b64decode(value.encode())
raise ValueError("Value must be of type str or bytes")
#
# Get all of the (base64-encoded) etcd keys at once, or wait until all the keys
# are published or timeout occurs.
# This is a helper method for the public interface methods.
#
# On success, a dictionary of {etcd key -> etcd value} is returned.
# On timeout, None is returned.
#
def _try_wait_get(self, b64_keys, override_timeout=None):
timeout = self.timeout if override_timeout is None else override_timeout # type: ignore[attr-defined]
deadline = time.time() + timeout.total_seconds()
while True:
# Read whole directory (of keys), filter only the ones waited for
all_nodes = self.client.get(key=self.prefix)
req_nodes = {
node.key: node.value for node in all_nodes.children if node.key in b64_keys
}
if len(req_nodes) == len(b64_keys):
# All keys are available
return req_nodes
watch_timeout = deadline - time.time()
if watch_timeout <= 0:
return None
try:
self.client.watch(
key=self.prefix,
recursive=True,
timeout=watch_timeout,
index=all_nodes.etcd_index + 1,
)
except etcd.EtcdWatchTimedOut:
if time.time() >= deadline:
return None
else:
continue
except etcd.EtcdEventIndexCleared:
continue
| pytorch-master | torch/distributed/elastic/rendezvous/etcd_store.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from .api import RendezvousHandler, RendezvousParameters
from .api import rendezvous_handler_registry as handler_registry
from .dynamic_rendezvous import create_handler
__all__ = ['get_rendezvous_handler']
def _create_static_handler(params: RendezvousParameters) -> RendezvousHandler:
from . import static_tcp_rendezvous
return static_tcp_rendezvous.create_rdzv_handler(params)
def _create_etcd_handler(params: RendezvousParameters) -> RendezvousHandler:
from . import etcd_rendezvous
return etcd_rendezvous.create_rdzv_handler(params)
def _create_etcd_v2_handler(params: RendezvousParameters) -> RendezvousHandler:
from .etcd_rendezvous_backend import create_backend
backend, store = create_backend(params)
return create_handler(store, backend, params)
def _create_c10d_handler(params: RendezvousParameters) -> RendezvousHandler:
from .c10d_rendezvous_backend import create_backend
backend, store = create_backend(params)
return create_handler(store, backend, params)
def _register_default_handlers() -> None:
handler_registry.register("etcd", _create_etcd_handler)
handler_registry.register("etcd-v2", _create_etcd_v2_handler)
handler_registry.register("c10d", _create_c10d_handler)
handler_registry.register("static", _create_static_handler)
def get_rendezvous_handler(params: RendezvousParameters) -> RendezvousHandler:
"""
This method is used to obtain a reference to a :py:class`RendezvousHandler`.
Custom rendezvous handlers can be registered by
::
from torch.distributed.elastid.rendezvous import rendezvous_handler_registry
from torch.distributed.elastic.rendezvous.registry import get_rendezvous_handler
def create_my_rdzv(params: RendezvousParameters):
return MyCustomRdzv(params)
rendezvous_handler_registry.register("my_rdzv_backend_name", create_my_rdzv)
my_rdzv_handler = get_rendezvous_handler("my_rdzv_backend_name", RendezvousParameters)
"""
return handler_registry.create_handler(params)
| pytorch-master | torch/distributed/elastic/rendezvous/registry.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
In the context of Torch Distributed Elastic we use the term *rendezvous* to
refer to a particular functionality that combines a **distributed
synchronization** primitive with **peer discovery**.
It is used by Torch Distributed Elastic to gather participants of a training
job (i.e. nodes) such that they all agree on the same list of participants and
everyone's roles, as well as make a consistent collective decision on when
training can begin/resume.
Torch Distributed Elastic rendezvous provides the following critical
functionalities:
**Barrier**:
Nodes performing rendezvous will all block until the rendezvous is considered
complete - this happens when at least ``min`` total number of nodes have joined
the rendezvous barrier (for the same job). This also implies the barrier is not
necessarily of fixed size.
There's an additional small waiting time after reaching ``min`` number of
nodes - this is used to ensure the rendezvous is not completed "too quickly"
(which could potentially exclude additional nodes attempting to join at
approximately the same time).
If ``max`` number of nodes is gathered at the barrier, the rendezvous is
completed immediately.
There's also an overall timeout which causes the rendezvous to fail if ``min``
number of nodes is never reached - this is meant to be a simple fail-safe to
help release partially allocated job resources, in case there's a problem with
the resource manager, and is meant to be interpreted as non-retryable.
**Exclusivity**:
A simple distributed barrier would not be sufficient, as we also need to ensure
that only one group of nodes exists at any given time (for a given job). In
other words, new nodes (i.e. joining late) should not be able to form a parallel
independent group of workers for the same job.
Torch Distributed Elastic rendezvous ensures that if a group of nodes has
already completed a rendezvous (and hence might already be training), then
additional "late" nodes attempting to rendezvous will only announce themselves
as waiting, and will have to wait until the (previously completed) existing
rendezvous is destroyed first.
**Consistency**:
When a rendezvous is completed, all its members will agree on the job membership
and everyone's role in it. This role is represented using an integer, called
rank, that is between between 0 and world size.
Note that ranks are *not stable*, in the sense that the same node can be
assigned a different rank in the next (re-)rendezvous.
**Fault-tolerance**:
Torch Distributed Elastic rendezvous is designed to tolerate node failures
during the rendezvous process. Should a process crash (or lose network
connectivity, etc), between joining the rendezvous and it being completed, then
a re-rendezvous with remaining healthy nodes will happen automatically.
A node can also fail *after* it has completed (or *has been observered* by other
nodes to have completed) the rendezvous - this scenario will be handled by the
Torch Distributed Elastic ``train_loop`` instead (where it will also trigger a
re-rendezvous).
**Shared key-value store**:
When the rendezvous is completed, a shared key-value store is created and
returned. This store implements a ``torch.distributed.Store`` API (see
`distributed communication docs
<https://pytorch.org/docs/stable/distributed.html>`__).
This store is only shared by the members of the completed rendezvous. It
is intended to be used by Torch Distributed Elastic to exchange information
necessary to initialize job control and data-planes.
**Waiting workers and rendezvous closing**:
Torch Distributed Elastic rendezvous handler object provides additional
functionalities, which are technically not part of the rendezvous process:
1. Querying how many workers arrived late at the barrier, who can participate in
*next* rendezvous.
2. Setting the rendezvous *closed* to signal all nodes not to participate in
next rendezvous.
**DynamicRendezvousHandler**:
Torch Distributed Elastic comes with the :py:class:`.DynamicRendezvousHandler`
class that implements the rendezvous mechanism described above. It is a backend-
agnostic type that expects a particular :py:class:`.RendezvousBackend` instance
to be specified during construction.
Torch distributed users can either implement their own backend type or use one
of the following implementations that come with PyTorch:
- :py:class:`.C10dRendezvousBackend`: Uses a C10d store (by default
``TCPStore``) as the rendezvous backend. The main advantage of using a C10d
store is that it requires no 3rd-party dependency (such as etcd) to establish
a rendezvous.
- :py:class:`.EtcdRendezvousBackend`: Supersedes the legacy
:py:class:`.EtcdRendezvousHandler` class. Passing an
:py:class:`.EtcdRendezvousBackend` instance to
:py:class:`.DynamicRendezvousHandler` is functionally equivalent to
instantiating an :py:class:`.EtcdRendezvousHandler`.
::
store = TCPStore("localhost")
backend = C10dRendezvousBackend(store, "my_run_id")
rdzv_handler = DynamicRendezvousHandler.from_backend(
run_id="my_run_id",
store=store,
backend=backend,
min_nodes=2,
max_nodes=4
)
"""
from .api import * # noqa: F403
from .registry import _register_default_handlers
_register_default_handlers()
__all__ = [
"RendezvousClosedError",
"RendezvousConnectionError",
"RendezvousError",
"RendezvousHandler",
"RendezvousHandlerCreator",
"RendezvousHandlerRegistry",
"RendezvousParameters",
"RendezvousStateError",
"RendezvousTimeoutError",
"rendezvous_handler_registry",
]
| pytorch-master | torch/distributed/elastic/rendezvous/__init__.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import sys
import threading
import time
from typing import Optional
import etcd # type: ignore[import]
from torch.distributed.elastic.rendezvous import (
RendezvousClosedError,
RendezvousError,
RendezvousHandler,
RendezvousParameters,
RendezvousTimeoutError,
)
from .utils import parse_rendezvous_endpoint
from .etcd_store import EtcdStore, cas_delay
_log_fmt = logging.Formatter("%(levelname)s %(asctime)s %(message)s")
_log_handler = logging.StreamHandler(sys.stderr)
_log_handler.setFormatter(_log_fmt)
log = logging.getLogger(__name__)
log.propagate = False
log.setLevel(logging.INFO)
log.addHandler(_log_handler)
# Retryable failure exception means the we were too late to make
# a desired state transition (e.g. because of a race condition),
# and should now restart from the beginning.
# A small delay is recommended to avoid spamming Etcd.
class EtcdRendezvousRetryableFailure(Exception):
pass
# Similar to retryable failure, but the new state we observed suggests we
# can re-try immediately, i.e. without a need for "safety delay".
class EtcdRendezvousRetryImmediately(Exception):
pass
# Default timeout for the rendezvous.
_DEFAULT_TIMEOUT: int = 600 # 10 minutes
# Additional waiting time after reaching the minimum number of nodes
# in case the rendezvous is elastic (min != max).
_DEFAULT_LAST_CALL_TIMEOUT: int = 30 # 30 seconds
# Various constants used internally in EtcdRendezvous
CONST_ETCD_SETUP_TTL = 5
CONST_ETCD_FROZEN_TTL = 10
CONST_ETCD_JOINABLE_EPHEMERAL_TTL = 10
# Ephemeral node TTL for worker's keep-alive key:
CONST_WORKER_KEEPALIVE_TTL = 10
# TTL for the ephemeral run_id-specific directory. All rendezvous state data
# for a specific run_id (job instance) is contained within directory.
# Its only role is to clean-up rendezvous data from old runs (for the case when
# etcd server is persistent), and has no affect on correctnes, but should be
# larger than any timeouts that a worker process is expected to survive:
CONST_RUNID_SUBROOT_TTL = 7200 # 2 hours
class EtcdRendezvousHandler(RendezvousHandler):
"""
Implements a
:py:class:`torch.distributed.elastic.rendezvous.RendezvousHandler` interface
backed by
:py:class:`torch.distributed.elastic.rendezvous.etcd_rendezvous.EtcdRendezvous`.
``EtcdRendezvousHandler`` uses a URL to configure the type of rendezvous to
use and to pass implementation specific configurations to the rendezvous
module. The basic etcd rendezvous configuration URL looks like the following
::
etcd://<etcd_address>:<port>/<job_id>?min_workers=<min_workers>&max_workers=<max_workers> # noqa: W605
-- example --
etcd://localhost:2379/1234?min_workers=1&max_workers=3
The URL above is interpreted as follows:
1. Use the rendezvous handler that is registered with the ``etcd``
scheme
2. The ``etcd`` endpoint to use is ``localhost:2379``
3. ``job_id == 1234`` is used as the prefix in etcd (this allows one to
share a common etcd server for multiple jobs so long as the
``job_ids`` are guaranteed to be unique). Note that the job id can be
any string (e.g. does not need to be a number) as long as it is
unique.
4. ``min_workers=1`` and ``max_workers=3`` specifies a range for
membership size - Torch Distributed Elastic starts running the job as
long as the cluster size is greater than or equal to ``min_workers``
and admits up to ``max_workers`` into the cluster.
Below are a full list of the parameters that can be passed to etcd
rendezvous:
+--------------------------------------------+--------------------------+
| Parameter | Description |
+============================================+==========================+
| min_workers | minimum number of |
| | workers for the |
| | rendezvous to be valid |
+--------------------------------------------+--------------------------+
| max_workers | maximum number of |
| | workers to admit |
+--------------------------------------------+--------------------------+
| timeout | total timeout within |
| | which next_rendezvous is |
| | expected to succeed |
| | (default 600s) |
+--------------------------------------------+--------------------------+
| last_call_timeout | additional wait amount |
| | (βlast callβ) after min |
| | number of workers has |
| | been reached (defaults |
| | to 30s) |
+--------------------------------------------+--------------------------+
| etcd_prefix | path prefix (from etcd |
| | root), inside which all |
| | etcd nodes will be |
| | created (defaults to |
| | ``/torchelastic/p2p``) |
+--------------------------------------------+--------------------------+
"""
def __init__(self, rdzv_impl):
self._rdzv_impl = rdzv_impl
def __del__(self):
# TODO: look into using weakref here instead.
del self._rdzv_impl
def get_backend(self) -> str:
return "etcd"
def next_rendezvous(self):
rdzv_version, rank, world_size = self._rdzv_impl.rendezvous_barrier()
log.info("Creating EtcdStore as the c10d::Store implementation")
store = self._rdzv_impl.setup_kv_store(rdzv_version)
return store, rank, world_size
def is_closed(self):
try:
_, state = self._rdzv_impl.get_rdzv_state()
return state["status"] == "closed"
except etcd.EtcdKeyNotFound:
# No rendezvous state, so it cannot be closed.
return False
def set_closed(self):
self._rdzv_impl.set_closed()
def num_nodes_waiting(self):
try:
_, state = self._rdzv_impl.get_rdzv_state()
if state["status"] == "final":
return state["num_workers_waiting"]
except etcd.EtcdKeyNotFound:
pass
return 0
def get_run_id(self) -> str:
return self._rdzv_impl._run_id
def shutdown(self) -> bool:
try:
self.set_closed()
return True
except BaseException as e:
log.warning(f"Shutdown failed. Error occurred: {str(e)}")
return False
# TODO: we should probably handle a few additional errors,
# like EtcdLeaderElectionInProgress and EtcdWatcherCleared. These are
# only relevant for multi-node Etcd ensemble. A simple retry would work,
# but is verbose to add everywhere. Consider wrapping the client calls
# into auto-retry for these errors?
#
class EtcdRendezvous(object):
"""
A rendezvous implementation that uses `etcd <https://etcd.io/>`__ as
the backend store.
"""
def __init__(
self,
client,
prefix,
run_id,
num_min_workers,
num_max_workers,
timeout,
last_call_timeout,
):
self.client = client
log.info("Etcd machines: " + str(self.client.machines))
self._prefix = prefix
self._run_id = run_id
self._num_min_workers = num_min_workers
self._num_max_workers = num_max_workers
self._timeout = timeout
self._last_call_timeout = last_call_timeout
# For cleaning up TTL refresher threads (for ephemeral keys)
self._lease_run_id_stop = None
self._lease_this_rank_stop = None
if not self._prefix.endswith("/"):
self._prefix += "/"
# Setup a permanent prefix dir, if didn't exist
if self._prefix != "/":
self.create_path_if_not_exists(self._prefix)
# Lease a "sub-root" node specific to this job instance (run_id)
self.create_path_if_not_exists(self.get_path(""), ttl=CONST_RUNID_SUBROOT_TTL)
self._lease_run_id_stop = self.setup_lease_renewal(
self.get_path(""), ttl=CONST_RUNID_SUBROOT_TTL
)
# Subdir for all rendezvous work
self.create_path_if_not_exists(self.get_path("/rdzv"))
# Create a rendezvous version counter, if doesn't exist
try:
self.client.write(
key=self.get_path("/rdzv/version_counter"), value="0", prevExist=False
)
except etcd.EtcdAlreadyExist:
pass
def __del__(self):
# TODO: look into using weakref here instead.
if self._lease_run_id_stop is not None:
self._lease_run_id_stop.set()
if self._lease_this_rank_stop is not None:
self._lease_this_rank_stop.set()
def rendezvous_barrier(self):
"""
Main entry point for next rendezvous.
This method is blocking until rendezvous succeeds or a timeout occurs.
Returns:
``(rdzv_version, rank, world_size)``
Raises:
RendezvousTimeoutError - timeout waiting for rendezvous
RendezvousClosedError - rendezvous is or was closed while waiting
RendezvousError - other persistent errors that
render the rendezvous non-retryable
"""
self._rendezvous_deadline = time.time() + self._timeout
while True:
if time.time() > self._rendezvous_deadline:
raise RendezvousTimeoutError()
log.info("Attempting to join next rendezvous")
try:
# Dis-own our lease in the previous rendezvous, if exists
if self._lease_this_rank_stop is not None:
self._lease_this_rank_stop.set()
return self.init_phase()
except EtcdRendezvousRetryImmediately:
# The type of failure suggests we can retry without delay
pass
except EtcdRendezvousRetryableFailure:
# In case of retryable failure, wait a small delay
# to avoid spamming etcd
time.sleep(1)
except RendezvousTimeoutError:
log.info("Rendezvous timeout occured in EtcdRendezvousHandler")
raise
except RendezvousClosedError:
log.info(
f"Rendezvous for run_id={self._run_id} was observed to be closed"
)
raise
except RendezvousError:
raise
except Exception as e:
# In case of a general exception, wait a small delay
# to avoid spamming etcd
# FIXME: there are a few things that fall under this like
# etcd.EtcdKeyNotFound, etc, which could be handled more explicitly.
log.info("Rendezvous attempt failed, will retry. Reason: " + str(e))
time.sleep(1)
def init_phase(self):
"""
Initially, the rendezvous state is expected to be one of:
1. empty (non-existent) - in this case we try to create a new one.
2. joinable - we try to join it.
3. final - we announce ourselves as waiting, and go into monitoring mode
Any other state is considered transitional, and will be retried after
a short delay.
Returns:
``(rdzv_version, rank, world_size)``
Raises:
RendezvousClosedError - current rendezvous was/is closed
EtcdRendezvousRetryableFailure - observed some intermediate
state, which is best handled by retrying later
"""
try:
active_version = self.try_create_rendezvous()
state = json.loads(active_version.value)
log.info("New rendezvous state created: " + str(state))
except etcd.EtcdAlreadyExist:
active_version, state = self.get_rdzv_state()
# Note: it is possible for above query to fail (etcd.EtcdKeyNotFound),
# but this is ok for us - just means we'll restart from beginning.
log.info("Observed existing rendezvous state: " + str(state))
if state["status"] == "closed":
raise RendezvousClosedError()
if state["status"] == "joinable":
return self.join_phase(state["version"])
if state["status"] == "final":
self.handle_existing_rendezvous(state["version"])
raise EtcdRendezvousRetryImmediately()
self.try_wait_for_state_change(etcd_index=active_version.etcd_index + 1)
raise EtcdRendezvousRetryableFailure()
def join_phase(self, expected_version):
"""
We observed a rendezvous state in 'joinable' state, and attempt to join this
particular version, and then wait for all other peers to join.
"""
# Failure to join will propagate an exception, causing a re-entry.
active_version, this_rank = self.join_rendezvous(expected_version)
state = json.loads(active_version.value)
log.info(
"Joined rendezvous version {} as rank {}. Full state: {}".format(
state["version"], this_rank, state
)
)
# If this worker was first to reach num_min_workers requirement,
# and rendezvous is still joinable (therefore it is elastic),
# then this worker will be repsonsible for waiting out the "last call"
# timeout and closing (i.e. transitioning to 'frozen') the rendezvous
# afterwards.
# As a safety against a potential failure of this worker (during the
# last call timeout), the rendezvous state is made ephemeral
# when min_num_workers is reached.
if this_rank == self._num_min_workers - 1 and state["status"] == "joinable":
log.info("Rank {} is responsible for join last call.".format(this_rank))
last_call_deadline = time.time() + self._last_call_timeout
self.handle_join_last_call(expected_version, last_call_deadline)
log.info("Rank {} finished join last call.".format(this_rank))
# Wait for rendezvous state to be frozen, which means a fixed set of peers
log.info("Waiting for remaining peers.")
active_version = self.wait_for_peers(expected_version)
state = json.loads(active_version.value)
assert (
state["version"] == expected_version
), "Logic error: failed to observe version mismatch"
return self.confirm_phase(expected_version, this_rank)
def confirm_phase(self, expected_version, this_rank):
"""
Once the rendezvous state trainsitions from 'joinable' to 'frozen',
we have every participant confirm their membership and setup per-member
keep-alive TTL keys, and then wait for all other participants to confirm,
which would then successfully conclude this rendezvous.
"""
log.info("All peers arrived. Confirming membership.")
self.confirm_membership(expected_version, this_rank)
log.info("Waiting for confirmations from all peers.")
active_version = self.wait_for_final(expected_version)
state = json.loads(active_version.value)
log.info(
"Rendezvous version {} is complete. Final state: {}".format(
state["version"], state
)
)
# Rendezvous version number; our rank in it; world size
return state["version"], this_rank, len(state["participants"])
def handle_existing_rendezvous(self, expected_version):
"""
Handle the case when there's an existing (state 'final) rendezvous already
in place, and we have to announce ourselves waiting, and wait until
the next rendezvous opportunity.
"""
# If state is 'final' -> increment num_workers_waiting
# Then, observe state changes:
# 1. if it's no longer final -> bail out and re-try
# 2. if keep alives are missing, destroy it and bail out.
active_state = self.announce_self_waiting(expected_version)
log.info(
"Added self to waiting list. Rendezvous full state: {}".format(
active_state.value
)
)
self.wait_for_rendezvous_to_free(expected_version)
log.info("Previously existing rendezvous state changed. Will re-try joining.")
def try_create_rendezvous(self):
"""
Create new rendezvous state or raise an exception that indicates
an unexpected state (e.g. already exists)
Raises:
RendezvousError - on unexpected state
"""
# Initially active_version is ephemeral - this is to handle the
# possibility that might fail to complete the setup transaction,
# i.e. the transition "setup" -> "joinable".
active_version = self.client.write(
key=self.get_path("/rdzv/active_version"),
value=json.dumps({"status": "setup"}),
prevExist=False,
ttl=CONST_ETCD_SETUP_TTL,
)
try:
version_counter = self.client.get(self.get_path("/rdzv/version_counter"))
version_counter.value = str(int(version_counter.value) + 1)
self.client.update(version_counter)
except (etcd.EtcdKeyNotFound, etcd.EtcdCompareFailed):
raise RendezvousError(
"Unexpected state of EtcdRendezvousHandler, worker needs to die."
)
# Any failure below results in declaring a retryable rendezvous failure.
# The ephemeral /rdzv/active_version will expire and someone can then
# re-try the setup process.
# Create directory node for participant data
self.client.write(
key=self.get_path("/rdzv/v_{}".format(version_counter.value)),
value=None,
dir=True,
prevExist=False,
)
# Publish rendezvous version and signal it is ready-to-be-joined.
# If rendezvous was set closed just before this, a retry will happen,
# where the closed condition will be handled.
return self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(
{
"status": "joinable",
"version": version_counter.value,
"participants": [],
}
),
prev_value=active_version.value,
)
def join_rendezvous(self, expected_version):
"""
Helper method for the join phase.
"""
# Use compare-and-swap to add self to rendezvous state:
while True:
cas_delay()
active_version, state = self.get_rdzv_state()
if state["status"] != "joinable":
raise EtcdRendezvousRetryableFailure(
"Rendezvous state became non-joinable before we could join. "
"Must join next one."
)
if state["version"] != expected_version:
raise EtcdRendezvousRetryImmediately(
"Rendezvous version changed. Must try join the new one."
)
assert (
len(state["participants"]) < self._num_max_workers
), "Logic error: joinable rendezvous should always have space left"
this_rank = len(state["participants"])
state["participants"].append(this_rank)
# When reaching min workers, or changing state to frozen, we'll set
# the active_version node to be ephemeral.
set_ttl: Optional[int] = None
if len(state["participants"]) == self._num_max_workers:
state["status"] = "frozen"
state["keep_alives"] = []
set_ttl = CONST_ETCD_FROZEN_TTL
elif len(state["participants"]) >= self._num_min_workers:
set_ttl = CONST_ETCD_JOINABLE_EPHEMERAL_TTL
try:
# Compare-and-swap.
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
ttl=set_ttl,
)
# We succeeded joining.
return active_version, this_rank
except etcd.EtcdCompareFailed:
log.info("Join rendezvous CAS unsuccessful, retrying")
def wait_for_peers(self, expected_version):
"""
Helper method for the join phase.
"""
active_version, state = self.get_rdzv_state()
while True:
if state["status"] == "frozen" and state["version"] == expected_version:
# Success, all peers arrived.
return active_version
elif state["status"] == "joinable" and state["version"] == expected_version:
# Continue waiting for any interesting events.
active_version, state = self.try_wait_for_state_change(
etcd_index=active_version.etcd_index + 1
)
else:
# No valid transition possible at this point
raise EtcdRendezvousRetryableFailure(
"Rendezvous state transition no longer possible. Must re-enter."
)
def confirm_membership(self, expected_version, this_rank):
"""
Helper method for the confirm phase
"""
# Compare-and-swap loop
while True:
cas_delay()
active_version, state = self.get_rdzv_state()
if state["status"] != "frozen":
raise EtcdRendezvousRetryImmediately(
"Rendezvous no longer frozen, before we confirmed. "
"Must join next one"
)
if state["version"] != expected_version:
raise EtcdRendezvousRetryImmediately(
"Rendezvous version changed. Must try join the new one."
)
this_lease_key = self.get_path(
"/rdzv/v_{}/rank_{}".format(expected_version, this_rank)
)
self.client.set(this_lease_key, value=None, ttl=CONST_WORKER_KEEPALIVE_TTL)
state["keep_alives"].append(this_lease_key)
if len(state["keep_alives"]) == len(state["participants"]):
# Everyone confirmed (this rank is last to do so)
state["status"] = "final"
state["num_workers_waiting"] = 0
finalize = True
else:
finalize = False
try:
# Compare-and-swap. If new state is still frozen, keep it ephemeral.
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
ttl=None if finalize else CONST_ETCD_FROZEN_TTL,
)
self._lease_this_rank_stop = self.setup_lease_renewal(
this_lease_key, ttl=CONST_WORKER_KEEPALIVE_TTL
)
return active_version
except etcd.EtcdCompareFailed:
log.info("Confirm membership CAS unsuccessful, retrying")
def wait_for_final(self, expected_version):
"""
Helper method for the confirm phase
"""
active_version, state = self.get_rdzv_state()
while True:
if state["status"] == "final" and state["version"] == expected_version:
# Succcess. This rendezvous is final, and we accept it.
return active_version
elif state["status"] == "frozen" and state["version"] == expected_version:
# Continue waiting for any interesting events.
active_version, state = self.try_wait_for_state_change(
etcd_index=active_version.etcd_index + 1
)
else:
# No valid transition possible at this point
raise EtcdRendezvousRetryableFailure(
"Rendezvous state transition no longer possible. Must re-enter."
)
def announce_self_waiting(self, expected_version):
"""
Announce this worker is waiting (via num_workers_waiting counter) to join next
rendezvous, but only if state and version match.
"""
while True:
cas_delay()
active_version, state = self.get_rdzv_state()
if state["status"] != "final" or state["version"] != expected_version:
raise EtcdRendezvousRetryImmediately()
# Increment counter to signal an additional waiting worker.
state["num_workers_waiting"] += 1
try:
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
)
return active_version
except etcd.EtcdCompareFailed:
log.info("Announce self as waiting CAS unsuccessful, retrying")
def wait_for_rendezvous_to_free(self, expected_version):
"""
When there's an existing valid rendezvous in state 'final', we have to
wait until the next opportunity to join.
Such opportunity may come from:
1. rendezvous state changed by someone else, in which case we unblock and retry.
2. rendezvous becomes invalid because at least one member failed to renew their
leased keep_alive node. We detect this, and destroy the rendezvous.
"""
active_version, state = self.get_rdzv_state()
while True:
if state["status"] != "final" or state["version"] != expected_version:
return
# Check if current rendezvous state is valid, in the sense that all
# its members are alive (renewing their lease).
# If not, try destroy this rendezvous, so a new one can be created.
alive_members = self.client.get(
self.get_path("/rdzv/v_{version}".format(version=expected_version))
)
keep_alive_keys = [ch.key for ch in alive_members.children]
for key in state["keep_alives"]:
if key not in keep_alive_keys:
# This participant didn't renew their lease. We'll declare this
# rendezvous version as dead (but only if it hadn't changed)
log.info("Keep-alive key {} is not renewed.".format(key))
log.info(
"Rendevous version {} is incomplete. ".format(expected_version)
)
log.info("Attempting to destroy it.")
# Compare-and-delete operation. Throws if compare failed,
# which means rendezvous was already destroyed/re-created/closed,
# and we can try to re-enter the barrier.
self.client.delete(
key=self.get_path("/rdzv/active_version"),
prevValue=active_version.value,
)
log.info(
"Destroyed rendezvous version {} successfully.".format(
expected_version
)
)
# We can return (and retry) immediately
return
# Existing rendezvous seems valid, no reason to destroy it.
# We just have to wait until something changes and re-check.
try:
overall_timeout = (
max(self._rendezvous_deadline - time.time(), 0.0) + 1.0
)
self.client.watch(
key=self.get_path("/rdzv"),
index=active_version.etcd_index + 1,
recursive=True,
timeout=overall_timeout,
)
except (etcd.EtcdEventIndexCleared, etcd.EtcdWatchTimedOut):
pass
if time.time() > self._rendezvous_deadline:
raise RendezvousTimeoutError()
active_version, state = self.get_rdzv_state()
def handle_join_last_call(self, expected_version, deadline):
"""
After we reach min number of workers, one particular worker takes on the
responsibility of waiting an additional timeout before closing the join window.
If the worker responsible for this fails, the rendezvous will be destroyed due
to expiring TTL, and the other participants will re-rendezvous.
Here we expect to see state <joinable, expected_version>
Exit gracefully if either:
1. state becomes <frozen, expected_version>
2. timeout happens (reaching deadline), in which case
we try the tranisiton to <frozen, expected_version>
Exit with exception otherwise.
"""
active_version, state = self.get_rdzv_state()
while True:
if state["status"] == "frozen" and state["version"] == expected_version:
# Worker set became frozen before last-call timeout. This is possible
# when num_max_workers is reached before the tiemout.
return
if state["status"] != "joinable" or state["version"] != expected_version:
raise EtcdRendezvousRetryableFailure(
"Rendezvous state transition no longer possible. Must re-enter."
)
# If timeout occurred, attempt a state transition (joinable -> frozen)
if time.time() >= deadline:
state["status"] = "frozen"
state["keep_alives"] = []
try:
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
ttl=CONST_ETCD_FROZEN_TTL,
)
# We successfully made this rendezvous frozen.
return
except etcd.EtcdCompareFailed:
log.info("Join last-call transition CAS unsuccessful. Will retry")
cas_delay()
active_version, state = self.get_rdzv_state()
continue
# Timeout did not occur, so we must refresh TTL, and wait for
# further changes. Note: we only want TTL to be refreshed if
# state is still joinable, hence we use CAS for that here,
# even though we don't change any of the data.
try:
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=active_version.value,
prev_value=active_version.value,
ttl=CONST_ETCD_JOINABLE_EPHEMERAL_TTL,
)
# Minimize "oversleeping":
timeout = min(
CONST_ETCD_JOINABLE_EPHEMERAL_TTL / 2,
deadline - time.time() + 1.0, # Oversleeping by 1s is ok.
)
active_version, state = self.try_wait_for_state_change(
etcd_index=active_version.etcd_index + 1, timeout=timeout
)
except etcd.EtcdCompareFailed:
log.info("Join last-call TTL refresh CAS unsuccessful, will retry")
cas_delay()
active_version, state = self.get_rdzv_state()
def set_closed(self):
"""
Mark rendezvous 'closed' for current run_id, which is used to signal other
participants to not attempt to perform (re-)rendezvous. This is useful
when one of the workers decides the job is complete.
"""
while True:
active_version, state = self.get_rdzv_state()
if state["status"] == "closed":
# Already closed by someone else.
return
state["status"] = "closed"
try:
self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
)
return
except etcd.EtcdCompareFailed:
log.info("Set closed CAS unsuccessful, retrying")
cas_delay()
def get_rdzv_state(self):
active_version = self.client.get(key=self.get_path("/rdzv/active_version"))
return active_version, json.loads(active_version.value)
def try_wait_for_state_change(self, etcd_index, timeout=None):
# Don't sleep past the overall deadline (at least more than by 1s)
overall_timeout = max(self._rendezvous_deadline - time.time(), 0.0) + 1.0
timeout = overall_timeout if timeout is None else min(timeout, overall_timeout)
try:
self.client.watch(
self.get_path("/rdzv/active_version"), index=etcd_index, timeout=timeout
)
except (etcd.EtcdEventIndexCleared, etcd.EtcdWatchTimedOut):
pass
if time.time() > self._rendezvous_deadline:
raise RendezvousTimeoutError()
# Unfortunately, we have to do another fetch in order to get last etcd_index.
return self.get_rdzv_state()
def get_path(self, path):
if not path.startswith("/"):
path = "/" + path
return "{prefix}run_{run_id}{path}".format(
prefix=self._prefix, run_id=self._run_id, path=path
)
def create_path_if_not_exists(self, full_path, ttl=None):
try:
self.client.write(
key=full_path, value=None, dir=True, prevExist=False, ttl=ttl
)
except etcd.EtcdAlreadyExist:
pass
def setup_lease_renewal(self, full_path, ttl):
# NOTE: For ephemeral key TTL renewal (~lease) to work correctly,
# make sure you don't call any long-blocking methods that do not
# release the Python's GIL! An example of this is calling a pybind11
# extension function that is blocking / long-running, but is not
# doing a scoped release of the GIL.
def lease_worker(client, path, ttl, stop_event):
while True:
try:
client.refresh(path, ttl=ttl)
except etcd.EtcdKeyNotFound:
break
except ConnectionRefusedError:
# This error usually occurs during test when the server already got terminated but the
# python garbage collector have not yet invoked the __del__ method.
break
if stop_event.wait(timeout=ttl / 2):
break
lease_stop_event = threading.Event()
lease_thread = threading.Thread(
target=lease_worker, args=(self.client, full_path, ttl, lease_stop_event)
)
lease_thread.daemon = True
lease_thread.start()
return lease_stop_event
def store_extra_data(self, rdzv_version, key, value):
node = self.get_path("/rdzv/v_{}/extra_data".format(rdzv_version))
try:
# If first time we are storing anything:
extra_data = self.client.write(
key=node, value=json.dumps({key: value}), prevExist=False
)
return
except etcd.EtcdAlreadyExist:
pass
# CAS loop, to make sure we don't lose concurrent stores.
while True:
# We never delete extra_data. Failure here should be fatal, no special handling.
extra_data = self.client.get(node)
new_extra_data_value = json.loads(extra_data.value)
new_extra_data_value[key] = value
try:
extra_data = self.client.test_and_set(
key=node,
value=json.dumps(new_extra_data_value),
prev_value=extra_data.value,
)
return
except etcd.EtcdCompareFailed:
log.info("Store extra_data CAS unsuccessful, retrying")
time.sleep(0.1)
def load_extra_data(self, rdzv_version, key, timeout=None):
# 'extra_data' node itself, and the directory it is located in:
node = self.get_path("/rdzv/v_{}/extra_data".format(rdzv_version))
node_dir = self.get_path("/rdzv/v_{}".format(rdzv_version))
# TODO: implement timeout
# https://github.com/pytorch/elastic/issues/12
while True:
# Combined wait for the node itself, and the key inside it.
root = self.client.get(node_dir)
# Find the extra_data node, if it exists
extra_data = [n for n in root.children if n.key == node]
assert len(extra_data) <= 1
# Node for extra_data exists, check the desired key inside it.
if len(extra_data) == 1:
extra_data_dict = json.loads(extra_data[0].value)
if key in extra_data_dict:
return extra_data_dict[key]
# The 'extra_data' node doesn't exist, or they key isn't published yet.
# Wait for interesting events on the extra_data node and retry.
try:
self.client.watch(node, index=root.etcd_index + 1)
except (etcd.EtcdEventIndexCleared, etcd.EtcdWatchTimedOut):
pass
def setup_kv_store(self, rdzv_version):
store_path = self.get_path(f"/rdzv/v_{rdzv_version}/kv")
self.create_path_if_not_exists(store_path)
return EtcdStore(etcd_client=self.client, etcd_store_prefix=store_path)
def _create_etcd_client(params: RendezvousParameters) -> etcd.Client:
"""
Creates a new ``etcd.Client`` from the specified ``RendezvousParameters``.
"""
hostname, port = parse_rendezvous_endpoint(params.endpoint, 2379)
# The communication protocol
protocol = params.config.get("protocol")
if protocol is None:
protocol = "http"
else:
if protocol != "http" and protocol != "https":
raise ValueError("The etcd protocol must be HTTP or HTTPS.")
# The SSL client certificate
ssl_cert = params.config.get("cert")
if ssl_cert is not None:
cert_key = params.config.get("key")
if cert_key is not None:
# The etcd client expects the certificate key as the second element
# of the `cert` tuple.
ssl_cert = (ssl_cert, cert_key)
# The root certificate
ca_cert = params.config.get("cacert")
return etcd.Client(
hostname,
port,
protocol=protocol,
cert=ssl_cert,
ca_cert=ca_cert,
allow_reconnect=True,
)
# Handler for torch.distributed "static" registration
def create_rdzv_handler(params: RendezvousParameters) -> RendezvousHandler:
"""
Usage:
::
rdzv_params = RendezvousParameters(
backend="etcd",
endpoint="192.168.0.42:2379",
run_id="123",
min_nodes=4,
max_nodes=8,
timeout=300,
last_call_timeout=30,
etcd_prefix="custom_prefix",
protocol="https",
cacert="/etc/kubernetes/certs/ca.crt",
cert="/etc/kubernetes/certs/client.crt",
key="/etc/kubernetes/certs/client.key")
# -- or --
rdzv_params = RendezvousParameters(
backend="etcd",
endpoint="192.168.0.42:2379",
run_id="123",
min_nodes=4,
max_nodes=8)
etcd_rdzv_handler = create_etcd_rendezvous_handler(rdzv_params)
Where:
run_id - unique id for this training job instance,
min_nodes - min number of workers expected to join the rendezvous,
max_nodes - max number of workers allowed to join the rendezvous,
defaults to min_workers is not specified.
timeout - total timeout within which next_rendezvous is expected to
succeed; a RendezvousTimeoutError is raised otherwise;
Defaults is 600 (10 minutes).
last_call_timeout - additional wait amount ("last call") after
min number of workers has been reached.
Defaults to 30 seconds.
etcd_prefix - path prefix (from etcd root), inside which all
etcd nodes will be created.
Default is "/torchelastic/p2p".
protocol - http (default) or https to access etcd.
cacert - CA cert to access etcd, only makes sense with https.
cert - client cert to access etcd, only makes sense with https.
key - client key to access etcd, only makes sense with https.
"""
client = _create_etcd_client(params)
etcd_prefix = params.get("etcd_prefix", "/torchelastic/p2p")
rdzv = EtcdRendezvous(
client=client,
prefix=etcd_prefix,
run_id=params.run_id,
num_min_workers=params.min_nodes,
num_max_workers=params.max_nodes,
timeout=params.get_as_int("timeout", _DEFAULT_TIMEOUT),
last_call_timeout=params.get_as_int("last_call_timeout", _DEFAULT_LAST_CALL_TIMEOUT),
)
return EtcdRendezvousHandler(rdzv_impl=rdzv)
| pytorch-master | torch/distributed/elastic/rendezvous/etcd_rendezvous.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import binascii
import logging
import os
import tempfile
from base64 import b64decode, b64encode
from datetime import timedelta
from typing import Any, Optional, Tuple, cast
from torch.distributed import FileStore, Store, TCPStore
from torch.distributed.elastic.events import (
NodeState,
construct_and_record_rdzv_event,
)
from .api import (
RendezvousConnectionError,
RendezvousError,
RendezvousParameters,
RendezvousStateError,
)
from .dynamic_rendezvous import RendezvousBackend, Token
from .utils import _matches_machine_hostname, parse_rendezvous_endpoint
log = logging.getLogger(__name__)
class C10dRendezvousBackend(RendezvousBackend):
"""Represents a C10d-backed rendezvous backend.
Args:
store:
The :py:class:`torch.distributed.Store` instance to use to
communicate with the C10d store.
run_id:
The run id of the rendezvous.
"""
# See the explanation in the __init__ method.
_NULL_SENTINEL = "Y2FuaW1hZGFt"
_store: Store
_key: str
def __init__(self, store: Store, run_id: str) -> None:
if not run_id:
raise ValueError("The run id must be a non-empty string.")
self._store = store
self._key = "torch.rendezvous." + run_id
# The read operation of a store blocks the caller until the specified
# key becomes available. This behavior makes it tricky to use a store
# as a regular key-value dictionary.
#
# As a workaround we initially set a sentinel value as the rendezvous
# state. Whenever this value gets returned we treat it as a None.
self._call_store("compare_set", self._key, "", self._NULL_SENTINEL)
@property
def name(self) -> str:
"""See base class."""
return "c10d"
def get_state(self) -> Optional[Tuple[bytes, Token]]:
"""See base class."""
base64_state: bytes = self._call_store("get", self._key)
return self._decode_state(base64_state)
def set_state(
self, state: bytes, token: Optional[Token] = None
) -> Optional[Tuple[bytes, Token, bool]]:
"""See base class."""
base64_state_str: str = b64encode(state).decode()
if token:
# Shortcut if we know for sure that the token is not valid.
if not isinstance(token, bytes):
result = self.get_state()
if result is not None:
tmp = *result, False
# Python 3.6 does not support tuple unpacking in return
# statements.
return tmp
return None
token = token.decode()
else:
token = self._NULL_SENTINEL
base64_state: bytes = self._call_store("compare_set", self._key, token, base64_state_str)
state_token_pair = self._decode_state(base64_state)
if state_token_pair is None:
return None
new_state, new_token = state_token_pair
# C10d Store's compare_set method does not offer an easy way to find out
# whether our write attempt was successful. As a brute-force solution we
# perform a bitwise comparison of our local state and the remote state.
return new_state, new_token, new_state == state
def _call_store(self, store_op: str, *args, **kwargs) -> Any:
try:
return getattr(self._store, store_op)(*args, **kwargs)
except (ValueError, RuntimeError, TimeoutError) as exc:
raise RendezvousConnectionError(
"The connection to the C10d store has failed. See inner exception for details."
) from exc
def _decode_state(self, base64_state: bytes) -> Optional[Tuple[bytes, Token]]:
if base64_state == self._NULL_SENTINEL.encode():
return None
try:
state = b64decode(base64_state)
except binascii.Error as exc:
raise RendezvousStateError(
"The state object is corrupt. See inner exception for details."
) from exc
return state, base64_state
def _create_tcp_store(params: RendezvousParameters) -> TCPStore:
host, port = parse_rendezvous_endpoint(params.endpoint, default_port=29400)
cfg_is_host = params.get_as_bool("is_host")
# If the user has explicitly specified whether our process should host the
# the store, respect it.
if cfg_is_host is not None:
is_host = cfg_is_host
# Otherwise try to determine whether we are the host based on our hostname
# and IP address.
else:
is_host = _matches_machine_hostname(host)
# The timeout
read_timeout = cast(int, params.get_as_int("read_timeout", 60))
if read_timeout <= 0:
raise ValueError("The read timeout must be a positive integer.")
# In specific cases we attempt to instantiate the store twice. For details
# see the explanation in the except clause below.
for is_server in [is_host, False]:
try:
store = TCPStore(
host, port, is_master=is_server, timeout=timedelta(seconds=read_timeout)
)
if is_server:
msg = f"Process {os.getpid()} hosts the TCP store for the C10d rendezvous backend."
construct_and_record_rdzv_event(
run_id=params.run_id, message=msg, node_state=NodeState.INIT
)
log.info(msg)
break
except (ValueError, RuntimeError, TimeoutError) as exc:
# If we heuristically inferred the value of is_host as True and our
# first attempt to instantiate the TCP store has failed, try it one
# more time with is_host set to False. As an edge case there can be
# more than one process that is part of the same rendezvous on this
# machine and only one of them will eventually host the store.
if not is_server or cfg_is_host is not None:
raise RendezvousConnectionError(
"The connection to the C10d store has failed. See inner exception for details."
) from exc
return store
def _create_file_store(params: RendezvousParameters) -> FileStore:
# If a user specifies an endpoint, we treat it as a path to a file.
if params.endpoint:
path = params.endpoint
else:
try:
# The temporary file is readable and writable only by the user of
# this process.
_, path = tempfile.mkstemp()
except OSError as exc:
raise RendezvousError(
"The file creation for C10d store has failed. See inner exception for details."
) from exc
try:
store = FileStore(path)
except (ValueError, RuntimeError) as exc:
raise RendezvousConnectionError(
"The connection to the C10d store has failed. See inner exception for details."
) from exc
return store
def create_backend(params: RendezvousParameters) -> Tuple[C10dRendezvousBackend, Store]:
"""Creates a new :py:class:`C10dRendezvousBackend` from the specified
parameters.
+--------------+-----------------------------------------------------------+
| Parameter | Description |
+==============+===========================================================+
| store_type | The type of the C10d store. The currently supported types |
| | are "tcp" and "file" which correspond to |
| | :py:class:`torch.distributed.TCPStore` and |
| | :py:class:`torch.distributed.FileStore`, respectively. |
| | Defaults to "tcp". |
+--------------+-----------------------------------------------------------+
| read_timeout | The read timeout, in seconds, for store operations. |
| | Defaults to 60 seconds. |
| | |
| | Note this only applies to |
| | :py:class:`torch.distributed.TCPStore`. It is not relevant|
| | to :py:class:`torch.distributed.FileStore` which does not |
| | take in timeout as a parameter. |
+--------------+-----------------------------------------------------------+
| is_host | A boolean value indicating whether this backend instance |
| | will host the C10d store. If not specified it will be |
| | inferred heuristically by matching the hostname or the IP |
| | address of this machine against the specified rendezvous |
| | endpoint. Defaults to ``None``. |
| | |
| | Note that this configuration option only applies to |
| | :py:class:`torch.distributed.TCPStore`. In normal |
| | circumstances you can safely skip it; the only time when |
| | it is needed is if its value cannot be correctly |
| | determined (e.g. the rendezvous endpoint has a CNAME as |
| | the hostname or does not match the FQDN of the machine). |
+--------------+-----------------------------------------------------------+
"""
# As of today we only support TCPStore and FileStore. Other store types do
# not have the required functionality (e.g. compare_set) yet.
store_type = params.get("store_type", "tcp").strip().lower()
store: Store
try:
if store_type == "file":
store = _create_file_store(params)
elif store_type == "tcp":
store = _create_tcp_store(params)
else:
raise ValueError("Invalid store type given. Currently only supports file and tcp.")
backend = C10dRendezvousBackend(store, params.run_id)
except Exception as e:
construct_and_record_rdzv_event(
message=f"{type(e).__name__}: {str(e)}",
run_id=params.run_id,
node_state=NodeState.FAILED,
)
raise
return backend, store
| pytorch-master | torch/distributed/elastic/rendezvous/c10d_rendezvous_backend.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, Optional, Tuple
from torch.distributed import Store
class RendezvousError(Exception):
"""Represents the base type for rendezvous errors."""
class RendezvousClosedError(RendezvousError):
"""Raised when a rendezvous is closed."""
class RendezvousTimeoutError(RendezvousError):
"""Raised when a rendezvous did not complete on time."""
class RendezvousConnectionError(RendezvousError):
"""Raised when the connection to a rendezvous backend has failed."""
class RendezvousStateError(RendezvousError):
"""Raised when the state of a rendezvous is corrupt."""
class RendezvousHandler(ABC):
"""Main rendezvous interface.
Note:
Distributed Torch users normally **do not** need to implement their own
``RendezvousHandler``. An implementation based on C10d Store is already
provided, and is recommended for most users.
"""
@abstractmethod
def get_backend(self) -> str:
"""Returns the name of the rendezvous backend."""
@abstractmethod
def next_rendezvous(
self,
) -> Tuple[Store, int, int]:
"""Main entry-point into the rendezvous barrier.
Blocks until the rendezvous is complete and the current process is
included in the formed worker group, or a timeout occurs, or the
rendezvous was marked closed.
Returns:
A tuple of :py:class:`torch.distributed.Store`, ``rank``, and
``world size``.
Raises:
RendezvousClosedError:
The rendezvous is closed.
RendezvousConnectionError:
The connection to the rendezvous backend has failed.
RendezvousStateError:
The rendezvous state is corrupt.
RendezvousTimeoutError:
The rendezvous did not complete on time.
"""
@abstractmethod
def is_closed(self) -> bool:
"""Checks whether the rendezvous has been closed.
A closed rendezvous means all future attempts to re-rendezvous within
same job will fail.
``is_closed()`` and :py:meth:`set_closed` have semantics of eventual
propagation and should not be used for synchronization. The intention is
that if at least one node decides the job is finished, it will close the
rendezvous, and other nodes will soon observe this and stop running as
well.
"""
@abstractmethod
def set_closed(self):
"""Marks the rendezvous as closed."""
@abstractmethod
def num_nodes_waiting(self) -> int:
"""Returns the number of nodes who arrived late at the rendezvous
barrier, hence were not included in the current worker group.
Callers should periodically call this method to check whether new
nodes are waiting to join the job and if so admit them by calling
:py:meth:`next_rendezvous()` (re-rendezvous).
"""
@abstractmethod
def get_run_id(self) -> str:
"""Returns the run id of the rendezvous.
The run id is a user-defined id that uniquely identifies an instance of
a distributed application. It typically maps to a job id and is used to
allow nodes to join the correct distributed application.
"""
def shutdown(self) -> bool:
"""Closes all resources that were open for the rendezvous.
Example::
rdzv_handler = ...
try:
store, rank, world_size = rdzv_handler.next_rendezvous()
finally:
rdzv_handler.shutdown()
"""
class RendezvousParameters:
"""Holds the parameters to construct a :py:class:`RendezvousHandler`.
Args:
backend:
The name of the backend to use to handle the rendezvous.
endpoint:
The endpoint of the rendezvous, usually in form <hostname>[:<port>].
run_id:
The id of the rendezvous.
min_nodes:
The minimum number of nodes to admit to the rendezvous.
max_nodes:
The maximum number of nodes to admit to the rendezvous.
**kwargs:
Additional parameters for the specified backend.
"""
def __init__(
self,
backend: str,
endpoint: str,
run_id: str,
min_nodes: int,
max_nodes: int,
**kwargs,
):
if not backend:
raise ValueError("The rendezvous backend name must be a non-empty string.")
if min_nodes < 1:
raise ValueError(
f"The minimum number of rendezvous nodes ({min_nodes}) must be greater than zero."
)
if max_nodes < min_nodes:
raise ValueError(
f"The maximum number of rendezvous nodes ({max_nodes}) must be greater than or "
f"equal to the minimum number of rendezvous nodes ({min_nodes})."
)
self.backend = backend
self.endpoint = endpoint
self.run_id = run_id
self.min_nodes = min_nodes
self.max_nodes = max_nodes
self.config = kwargs
def get(self, key: str, default: Any = None) -> Any:
"""Returns the value for ``key`` if ``key`` exists, else ``default``."""
return self.config.get(key, default)
def get_as_bool(self, key: str, default: Optional[bool] = None) -> Optional[bool]:
"""Returns the value for ``key`` as a ``bool``."""
value = self.get(key, default)
if value is None or isinstance(value, bool):
return value
if isinstance(value, int):
if value == 1:
return True
if value == 0:
return False
elif isinstance(value, str):
if value.lower() in ["1", "true", "t", "yes", "y"]:
return True
if value.lower() in ["0", "false", "f", "no", "n"]:
return False
raise ValueError(
f"The rendezvous configuration option '{key}' does not represent a valid boolean value."
)
def get_as_int(self, key: str, default: Optional[int] = None) -> Optional[int]:
"""Returns the value for ``key`` as an ``int``."""
value = self.get(key, default)
if value is None:
return value
try:
return int(value)
except ValueError:
raise ValueError(
f"The rendezvous configuration option '{key}' does not represent a valid integer "
"value."
)
RendezvousHandlerCreator = Callable[[RendezvousParameters], RendezvousHandler]
class RendezvousHandlerRegistry:
"""Represents a registry of :py:class:`RendezvousHandler` backends."""
_registry: Dict[str, RendezvousHandlerCreator]
def __init__(self) -> None:
self._registry = {}
def register(self, backend: str, creator: RendezvousHandlerCreator) -> None:
"""Registers a new rendezvous backend.
Args:
backend:
The name of the backend.
creater:
The callback to invoke to construct the
:py:class:`RendezvousHandler`.
"""
if not backend:
raise ValueError("The rendezvous backend name must be a non-empty string.")
current_creator: Optional[RendezvousHandlerCreator]
try:
current_creator = self._registry[backend]
except KeyError:
current_creator = None
if current_creator is not None and current_creator != creator:
raise ValueError(
f"The rendezvous backend '{backend}' cannot be registered with '{creator}' as it "
f"is already registered with '{current_creator}'."
)
self._registry[backend] = creator
def create_handler(self, params: RendezvousParameters) -> RendezvousHandler:
"""Creates a new :py:class:`RendezvousHandler`."""
try:
creator = self._registry[params.backend]
except KeyError:
raise ValueError(
f"The rendezvous backend '{params.backend}' is not registered. Did you forget "
f"to call `{self.register.__name__}`?"
)
handler = creator(params)
# Do some sanity check.
if handler.get_backend() != params.backend:
raise RuntimeError(
f"The rendezvous backend '{handler.get_backend()}' does not match the requested "
f"backend '{params.backend}'."
)
return handler
# The default global registry instance used by launcher scripts to instantiate
# rendezvous handlers.
rendezvous_handler_registry = RendezvousHandlerRegistry()
| pytorch-master | torch/distributed/elastic/rendezvous/api.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import binascii
from base64 import b64decode, b64encode
from typing import Optional, Tuple, cast
import urllib3.exceptions # type: ignore[import]
from etcd import Client as EtcdClient # type: ignore[import]
from etcd import (
EtcdAlreadyExist,
EtcdCompareFailed,
EtcdException,
EtcdKeyNotFound,
EtcdResult,
)
from torch.distributed import Store
from .api import RendezvousConnectionError, RendezvousParameters, RendezvousStateError
from .dynamic_rendezvous import RendezvousBackend, Token
from .etcd_store import EtcdStore
from .utils import parse_rendezvous_endpoint
class EtcdRendezvousBackend(RendezvousBackend):
"""Represents an etcd-based rendezvous backend.
Args:
client:
The ``etcd.Client`` instance to use to communicate with etcd.
run_id:
The run id of the rendezvous.
key_prefix:
The path under which to store the rendezvous state in etcd.
ttl:
The TTL of the rendezvous state. If not specified, defaults to two hours.
"""
_DEFAULT_TTL = 7200 # 2 hours
_client: EtcdClient
_key: str
_ttl: int
def __init__(
self,
client: EtcdClient,
run_id: str,
key_prefix: Optional[str] = None,
ttl: Optional[int] = None,
) -> None:
if not run_id:
raise ValueError("The run id must be a non-empty string.")
self._client = client
if key_prefix:
self._key = key_prefix + "/" + run_id
else:
self._key = run_id
if ttl and ttl > 0:
self._ttl = ttl
else:
self._ttl = self._DEFAULT_TTL
@property
def name(self) -> str:
"""See base class."""
return "etcd-v2"
def get_state(self) -> Optional[Tuple[bytes, Token]]:
"""See base class."""
try:
result = self._client.read(self._key)
except EtcdKeyNotFound:
return None
except (EtcdException, urllib3.exceptions.TimeoutError) as exc:
raise RendezvousConnectionError(
"The connection to etcd has failed. See inner exception for details."
) from exc
return self._decode_state(result)
def set_state(
self, state: bytes, token: Optional[Token] = None
) -> Optional[Tuple[bytes, Token, bool]]:
"""See base class."""
base64_state = b64encode(state).decode()
kwargs = {}
def get_state():
result = self.get_state()
if result is not None:
tmp = *result, False
# Python 3.6 does not support tuple unpacking in return
# statements.
return tmp
return None
if token:
try:
token = int(token)
except ValueError:
return get_state()
if token:
kwargs["prevIndex"] = token
else:
kwargs["prevExist"] = False
try:
result = self._client.write(self._key, base64_state, self._ttl, **kwargs)
except (EtcdAlreadyExist, EtcdCompareFailed):
result = None
except (EtcdException, urllib3.exceptions.TimeoutError) as exc:
raise RendezvousConnectionError(
"The connection to etcd has failed. See inner exception for details."
) from exc
if result is None:
return get_state()
tmp = *self._decode_state(result), True
return tmp
def _decode_state(self, result: EtcdResult) -> Tuple[bytes, Token]:
base64_state = result.value.encode()
try:
state = b64decode(base64_state)
except binascii.Error as exc:
raise RendezvousStateError(
"The state object is corrupt. See inner exception for details."
) from exc
return state, result.modifiedIndex
def _create_etcd_client(params: RendezvousParameters) -> EtcdClient:
host, port = parse_rendezvous_endpoint(params.endpoint, default_port=2379)
# The timeout
read_timeout = cast(int, params.get_as_int("read_timeout", 60))
if read_timeout <= 0:
raise ValueError("The read timeout must be a positive integer.")
# The communication protocol
protocol = params.get("protocol", "http").strip().lower()
if protocol != "http" and protocol != "https":
raise ValueError("The protocol must be HTTP or HTTPS.")
# The SSL client certificate
ssl_cert = params.get("ssl_cert")
if ssl_cert:
ssl_cert_key = params.get("ssl_cert_key")
if ssl_cert_key:
# The etcd client expects the certificate key as the second element
# of the `cert` tuple.
ssl_cert = (ssl_cert, ssl_cert_key)
# The root certificate
ca_cert = params.get("ca_cert")
try:
return EtcdClient(
host,
port,
read_timeout=read_timeout,
protocol=protocol,
cert=ssl_cert,
ca_cert=ca_cert,
allow_reconnect=True,
)
except (EtcdException, urllib3.exceptions.TimeoutError) as exc:
raise RendezvousConnectionError(
"The connection to etcd has failed. See inner exception for details."
) from exc
def create_backend(params: RendezvousParameters) -> Tuple[EtcdRendezvousBackend, Store]:
"""Creates a new :py:class:`EtcdRendezvousBackend` from the specified
parameters.
+--------------+-----------------------------------------------------------+
| Parameter | Description |
+==============+===========================================================+
| read_timeout | The read timeout, in seconds, for etcd operations. |
| | Defaults to 60 seconds. |
+--------------+-----------------------------------------------------------+
| protocol | The protocol to use to communicate with etcd. Valid |
| | values are "http" and "https". Defaults to "http". |
+--------------+-----------------------------------------------------------+
| ssl_cert | The path to the SSL client certificate to use along with |
| | HTTPS. Defaults to ``None``. |
+--------------+-----------------------------------------------------------+
| ssl_cert_key | The path to the private key of the SSL client certificate |
| | to use along with HTTPS. Defaults to ``None``. |
+--------------+-----------------------------------------------------------+
| ca_cert | The path to the rool SSL authority certificate. Defaults |
| | to ``None``. |
+--------------+-----------------------------------------------------------+
"""
client = _create_etcd_client(params)
backend = EtcdRendezvousBackend(client, params.run_id, key_prefix="/torch/elastic/rendezvous")
store = EtcdStore(client, "/torch/elastic/store")
return backend, store
| pytorch-master | torch/distributed/elastic/rendezvous/etcd_rendezvous_backend.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import ipaddress
import random
import re
import socket
import time
import weakref
from datetime import timedelta
from threading import Event, Thread
from typing import Any, Callable, Dict, Optional, Tuple, Union
__all__ = ['parse_rendezvous_endpoint']
def _parse_rendezvous_config(config_str: str) -> Dict[str, str]:
"""Extracts key-value pairs from a rendezvous configuration string.
Args:
config_str:
A string in format <key1>=<value1>,...,<keyN>=<valueN>.
"""
config: Dict[str, str] = {}
config_str = config_str.strip()
if not config_str:
return config
key_values = config_str.split(",")
for kv in key_values:
key, *values = kv.split("=", 1)
key = key.strip()
if not key:
raise ValueError(
"The rendezvous configuration string must be in format "
"<key1>=<value1>,...,<keyN>=<valueN>."
)
value: Optional[str]
if values:
value = values[0].strip()
else:
value = None
if not value:
raise ValueError(
f"The rendezvous configuration option '{key}' must have a value specified."
)
config[key] = value
return config
def _try_parse_port(port_str: str) -> Optional[int]:
"""Tries to extract the port number from ``port_str``."""
if port_str and re.match(r"^[0-9]{1,5}$", port_str):
return int(port_str)
return None
def parse_rendezvous_endpoint(endpoint: Optional[str], default_port: int) -> Tuple[str, int]:
"""Extracts the hostname and the port number from a rendezvous endpoint.
Args:
endpoint:
A string in format <hostname>[:<port>].
default_port:
The port number to use if the endpoint does not include one.
Returns:
A tuple of hostname and port number.
"""
if endpoint is not None:
endpoint = endpoint.strip()
if not endpoint:
return ("localhost", default_port)
# An endpoint that starts and ends with brackets represents an IPv6 address.
if endpoint[0] == "[" and endpoint[-1] == "]":
host, *rest = endpoint, *[]
else:
host, *rest = endpoint.rsplit(":", 1)
# Sanitize the IPv6 address.
if len(host) > 1 and host[0] == "[" and host[-1] == "]":
host = host[1:-1]
if len(rest) == 1:
port = _try_parse_port(rest[0])
if port is None or port >= 2 ** 16:
raise ValueError(
f"The port number of the rendezvous endpoint '{endpoint}' must be an integer "
"between 0 and 65536."
)
else:
port = default_port
if not re.match(r"^[\w\.:-]+$", host):
raise ValueError(
f"The hostname of the rendezvous endpoint '{endpoint}' must be a dot-separated list of "
"labels, an IPv4 address, or an IPv6 address."
)
return host, port
def _matches_machine_hostname(host: str) -> bool:
"""Indicates whether ``host`` matches the hostname of this machine.
This function compares ``host`` to the hostname as well as to the IP
addresses of this machine. Note that it may return a false negative if this
machine has CNAME records beyond its FQDN or IP addresses assigned to
secondary NICs.
"""
if host == "localhost":
return True
try:
addr = ipaddress.ip_address(host)
except ValueError:
addr = None
if addr and addr.is_loopback:
return True
this_host = socket.gethostname()
if host == this_host:
return True
addr_list = socket.getaddrinfo(
this_host, None, proto=socket.IPPROTO_TCP, flags=socket.AI_CANONNAME
)
for addr_info in addr_list:
# If we have an FQDN in the addr_info, compare it to `host`.
if addr_info[3] and addr_info[3] == host:
return True
# Otherwise if `host` represents an IP address, compare it to our IP
# address.
if addr and addr_info[4][0] == str(addr):
return True
return False
def _delay(seconds: Union[float, Tuple[float, float]]) -> None:
"""Suspends the current thread for ``seconds``.
Args:
seconds:
Either the delay, in seconds, or a tuple of a lower and an upper
bound within which a random delay will be picked.
"""
if isinstance(seconds, tuple):
seconds = random.uniform(*seconds)
# Ignore delay requests that are less than 10 milliseconds.
if seconds >= 0.01:
time.sleep(seconds)
class _PeriodicTimer:
"""Represents a timer that periodically runs a specified function.
Args:
interval:
The interval, in seconds, between each run.
function:
The function to run.
"""
# The state of the timer is hold in a separate context object to avoid a
# reference cycle between the timer and the background thread.
class _Context:
interval: float
function: Callable[..., None]
args: Tuple[Any, ...]
kwargs: Dict[str, Any]
stop_event: Event
_name: Optional[str]
_thread: Optional[Thread]
_finalizer: Optional[weakref.finalize]
# The context that is shared between the timer and the background thread.
_ctx: _Context
def __init__(
self,
interval: timedelta,
function: Callable[..., None],
*args: Any,
**kwargs: Any,
) -> None:
self._name = None
self._ctx = self._Context()
self._ctx.interval = interval.total_seconds()
self._ctx.function = function # type: ignore[assignment]
self._ctx.args = args or ()
self._ctx.kwargs = kwargs or {}
self._ctx.stop_event = Event()
self._thread = None
self._finalizer = None
@property
def name(self) -> Optional[str]:
"""Gets the name of the timer."""
return self._name
def set_name(self, name: str) -> None:
"""Sets the name of the timer.
The specified name will be assigned to the background thread and serves
for debugging and troubleshooting purposes.
"""
if self._thread:
raise RuntimeError("The timer has already started.")
self._name = name
def start(self) -> None:
"""Start the timer."""
if self._thread:
raise RuntimeError("The timer has already started.")
self._thread = Thread(
target=self._run, name=self._name or "PeriodicTimer", args=(self._ctx,), daemon=True
)
# We avoid using a regular finalizer (a.k.a. __del__) for stopping the
# timer as joining a daemon thread during the interpreter shutdown can
# cause deadlocks. The weakref.finalize is a superior alternative that
# provides a consistent behavior regardless of the GC implementation.
self._finalizer = weakref.finalize(
self, self._stop_thread, self._thread, self._ctx.stop_event
)
# We do not attempt to stop our background thread during the interpreter
# shutdown. At that point we do not even know whether it still exists.
self._finalizer.atexit = False
self._thread.start()
def cancel(self) -> None:
"""Stop the timer at the next opportunity."""
if self._finalizer:
self._finalizer()
@staticmethod
def _run(ctx) -> None:
while not ctx.stop_event.wait(ctx.interval):
ctx.function(*ctx.args, **ctx.kwargs)
@staticmethod
def _stop_thread(thread, stop_event):
stop_event.set()
thread.join()
| pytorch-master | torch/distributed/elastic/rendezvous/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import inspect
import logging
import os
import pickle
import socket
import threading
import time
import weakref
from abc import ABC, abstractmethod
from dataclasses import dataclass
from datetime import datetime, timedelta
from enum import Enum
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, cast
from torch.distributed import PrefixStore, Store
from torch.distributed.elastic.events import (
NodeState,
construct_and_record_rdzv_event,
)
from .api import (
RendezvousClosedError,
RendezvousError,
RendezvousHandler,
RendezvousParameters,
RendezvousStateError,
RendezvousTimeoutError,
)
from .utils import _delay, _PeriodicTimer
__all__ = ['RendezvousBackend', 'RendezvousTimeout', 'RendezvousSettings', 'DynamicRendezvousHandler', 'create_handler']
log = logging.getLogger(__name__)
def get_method_name(depth=2):
if len(inspect.stack()) > depth:
return inspect.stack()[depth].function
return "no_method_name"
Token = Any
"""Represents an opaque fencing token used by the rendezvous backend."""
class RendezvousBackend(ABC):
"""Represents a backend that holds the rendezvous state."""
@property
@abstractmethod
def name(self) -> str:
"""Gets the name of the backend."""
@abstractmethod
def get_state(self) -> Optional[Tuple[bytes, Token]]:
"""Gets the rendezvous state.
Returns:
A tuple of the encoded rendezvous state and its fencing token or
``None`` if no state is found in the backend.
Raises:
RendezvousConnectionError:
The connection to the backend has failed.
RendezvousStateError:
The rendezvous state is corrupt.
"""
@abstractmethod
def set_state(
self, state: bytes, token: Optional[Token] = None
) -> Optional[Tuple[bytes, Token, bool]]:
"""Sets the rendezvous state.
The new rendezvous state is set conditionally:
- If the specified ``token`` matches the fencing token stored in the
backend, the state will be updated. The new state will be returned
to the caller along with its fencing token.
- If the specified ``token`` does not match the fencing token stored
in the backend, the state won't be updated; instead the existing
state along with its fencing token will be returned to the caller.
- If the specified ``token`` is ``None``, the new state will be set
only if there is no existing state in the backend. Either the new
state or the existing state along with its fencing token will be
returned to the caller.
Args:
state:
The encoded rendezvous state.
token:
An optional fencing token that was retrieved by a previous call
to :py:meth:`get_state` or ``set_state()``.
Returns:
A tuple of the serialized rendezvous state, its fencing token, and
a boolean value indicating whether our set attempt succeeded.
Raises:
RendezvousConnectionError:
The connection to the backend has failed.
RendezvousStateError:
The rendezvous state is corrupt.
"""
class RendezvousTimeout:
"""Holds the timeout configuration of a rendezvous.
Args:
join:
The time within which the rendezvous is expected to complete.
last_call:
An additional wait amount before completing the rendezvous once the
rendezvous has the minimum number of required participants.
close:
The time within which the rendezvous is expected to close after a
call to :py:meth:`RendezvousHandler.set_closed` or
:py:meth:`RendezvousHandler.shutdown`.
keep_alive:
The time within which a keep-alive heartbeat is expected to
complete.
"""
_ZERO = timedelta(0)
_DEFAULT_TIMEOUTS = {
"join": timedelta(seconds=600),
"last_call": timedelta(seconds=30),
"close": timedelta(seconds=30),
"heartbeat": timedelta(seconds=5),
}
_join: timedelta
_last_call: timedelta
_close: timedelta
_heartbeat: timedelta
def __init__(
self,
join: Optional[timedelta] = None,
last_call: Optional[timedelta] = None,
close: Optional[timedelta] = None,
heartbeat: Optional[timedelta] = None,
) -> None:
self._set_timeouts(join=join, last_call=last_call, close=close, heartbeat=heartbeat)
@property
def join(self) -> timedelta:
"""Gets the join timeout."""
return self._join
@property
def last_call(self) -> timedelta:
"""Gets the last call timeout."""
return self._last_call
@property
def close(self) -> timedelta:
"""Gets the close timeout."""
return self._close
@property
def heartbeat(self) -> timedelta:
"""Gets the keep-alive heartbeat timeout."""
return self._heartbeat
def _set_timeouts(self, **timeouts: Optional[timedelta]):
for name, timeout in timeouts.items():
if timeout is None:
timeout = self._DEFAULT_TIMEOUTS[name]
if timeout <= self._ZERO:
raise ValueError(f"The {name} timeout ({timeout}) must be positive.")
setattr(self, "_" + name, timeout)
@dataclass(repr=False, eq=False, frozen=True)
class RendezvousSettings:
"""Holds the settings of the rendezvous.
Attributes:
run_id:
The run id of the rendezvous.
min_nodes:
The minimum number of nodes to admit to the rendezvous.
max_nodes:
The maximum number of nodes to admit to the rendezvous.
timeout:
The timeout configuration of the rendezvous.
keep_alive_interval:
The amount of time a node waits before sending a heartbeat to keep
it alive in the rendezvous.
keep_alive_max_attempt:
The maximum number of failed heartbeat attempts after which a node
is considered dead.
"""
run_id: str
min_nodes: int
max_nodes: int
timeout: RendezvousTimeout
keep_alive_interval: timedelta
keep_alive_max_attempt: int
@dataclass(eq=True, order=True, frozen=True)
class _NodeDesc:
"""Describes a node in the rendezvous.
Attributes:
fqdn:
The FQDN of the node.
pid:
The id of the process in which the rendezvous handler runs.
local_id:
A process-wide unique id.
"""
fqdn: str
pid: int
local_id: int
def __repr__(self) -> str:
return f"{self.fqdn}_{self.pid}_{self.local_id}"
class _NodeDescGenerator:
"""Generates node descriptors.
A node descriptor is a combination of an FQDN, a process id, and an auto-
incremented integer that uniquely identifies a node in the rendezvous.
"""
_lock: threading.Lock
_local_id: int
def __init__(self) -> None:
self._lock = threading.Lock()
# An integer that is incremented with each call to generate().
self._local_id = 0
def generate(self) -> _NodeDesc:
# This method can be called by multiple threads concurrently; therefore,
# we must increment the integer atomically.
with self._lock:
local_id = self._local_id
self._local_id += 1
return _NodeDesc(socket.getfqdn(), os.getpid(), local_id)
class _RendezvousState:
"""Holds the state of a rendezvous.
Attributes:
round:
The current round of the rendezvous.
complete:
A boolean value indicating whether the current round of the
rendezvous is complete.
deadline:
The time at which the current round of the rendezvous will be
considered complete if it is still waiting for nodes to join.
closed:
A boolean value indicating whether the rendezvous is closed.
participants:
A dictionary of the participants and their corresponding ranks.
wait_list:
A set of nodes that are waiting to participate in the next round of
the rendezvous.
last_heartbeats:
A dictionary containing each node's last heartbeat time.
"""
round: int
complete: bool
deadline: Optional[datetime]
closed: bool
participants: Dict[_NodeDesc, int]
wait_list: Set[_NodeDesc]
last_heartbeats: Dict[_NodeDesc, datetime]
def __init__(self) -> None:
self.round = 0
self.complete = False
self.deadline = None
self.closed = False
self.participants = {}
self.wait_list = set()
self.last_heartbeats = {}
def _remove_participant_epilogue(state: _RendezvousState, settings: RendezvousSettings) -> None:
if state.complete:
# If we do not have any participants left, move to the next round.
if not state.participants:
state.complete = False
state.round += 1
else:
if len(state.participants) < settings.min_nodes:
state.deadline = None
class _RendezvousStateHolder(ABC):
"""Holds the shared rendezvous state synced with other nodes."""
@property
@abstractmethod
def state(self) -> _RendezvousState:
"""Gets the local state."""
@abstractmethod
def sync(self) -> Optional[bool]:
"""Reads or writes the latest state.
Returns:
A boolean value indicating whether the local state, in case marked
as dirty, was successfully synced with other nodes.
"""
@abstractmethod
def mark_dirty(self) -> None:
"""Marks the local state as dirty."""
class _BackendRendezvousStateHolder(_RendezvousStateHolder):
"""Holds the rendezvous state synced with other nodes via a backend.
Args:
backend:
The rendezvous backend to use.
settings:
The rendezvous settings.
cache_duration:
The amount of time, in seconds, to cache the last rendezvous state
before requesting it from the backend again.
"""
_backend: RendezvousBackend
_state: _RendezvousState
_settings: RendezvousSettings
_cache_duration: int
_token: Token
_dirty: bool
_last_sync_time: float
_dead_nodes: List[_NodeDesc]
def __init__(
self,
backend: RendezvousBackend,
settings: RendezvousSettings,
cache_duration: int = 1,
) -> None:
self._backend = backend
self._state = _RendezvousState()
self._settings = settings
self._cache_duration = cache_duration
self._token = None
self._dirty = False
self._last_sync_time = -1
self._dead_nodes = []
def _record(self, message: str, node_state: NodeState = NodeState.RUNNING):
construct_and_record_rdzv_event(
name=f"{self.__class__.__name__}.{get_method_name()}",
run_id=self._settings.run_id,
message=message,
node_state=node_state,
)
@property
def state(self) -> _RendezvousState:
"""See base class."""
return self._state
def sync(self) -> Optional[bool]:
"""See base class."""
state_bits: Optional[bytes] = None
token = None
has_set: Optional[bool]
if self._dirty:
has_set = False
state_bits = pickle.dumps(self._state)
set_response = self._backend.set_state(state_bits, self._token)
if set_response is not None:
state_bits, token, has_set = set_response
else:
has_set = None
if self._cache_duration > 0:
# Avoid overloading the backend if we are asked to retrieve the
# state repeatedly. Try to serve the cached state.
if self._last_sync_time >= max(time.monotonic() - self._cache_duration, 0):
return None
get_response = self._backend.get_state()
if get_response is not None:
state_bits, token = get_response
if state_bits is not None:
try:
self._state = pickle.loads(state_bits)
except pickle.PickleError as exc:
raise RendezvousStateError(
"The rendezvous state is corrupt. See inner exception for details."
) from exc
else:
self._state = _RendezvousState()
if has_set and self._dead_nodes and log.isEnabledFor(logging.DEBUG):
node_list = ", ".join(f"'{dead_node}'" for dead_node in self._dead_nodes)
msg = (
f"As part of the sync operation the node(s) {node_list} have been removed from the "
f"rendezvous '{self._settings.run_id}' since they had no heartbeat."
)
self._record(message=msg)
log.debug(msg)
self._token = token
self._dirty = False
self._last_sync_time = time.monotonic()
self._sanitize()
return has_set
def _sanitize(self) -> None:
state = self._state
expire_time = datetime.utcnow() - (
self._settings.keep_alive_interval * self._settings.keep_alive_max_attempt
)
# Filter out the dead nodes.
self._dead_nodes = [
node
for node, last_heartbeat in state.last_heartbeats.items()
if last_heartbeat < expire_time
]
participant_removed = False
for dead_node in self._dead_nodes:
del state.last_heartbeats[dead_node]
try:
del state.participants[dead_node]
participant_removed = True
except KeyError:
pass
try:
state.wait_list.remove(dead_node)
except KeyError:
pass
if participant_removed:
# Common epilogue shared with the _remove_from_participants()
# function of _DistributedRendezvousOpExecutor.
_remove_participant_epilogue(state, self._settings)
def mark_dirty(self) -> None:
"""See base class.
If the local rendezvous state is dirty, the next sync call will try to
write the changes back to the backend. However this attempt might fail
if another node, which had the same state, also made changes and wrote
them before us.
"""
self._dirty = True
class _Action(Enum):
"""Specifies the possible actions based on the state of the rendezvous."""
KEEP_ALIVE = 1
ADD_TO_PARTICIPANTS = 2
ADD_TO_WAIT_LIST = 3
REMOVE_FROM_PARTICIPANTS = 4
REMOVE_FROM_WAIT_LIST = 5
MARK_RENDEZVOUS_COMPLETE = 6
MARK_RENDEZVOUS_CLOSED = 7
SYNC = 8
ERROR_CLOSED = 9
ERROR_TIMEOUT = 10
FINISH = 11
class _RendezvousContext:
"""Holds the context of the rendezvous.
Attributes:
node:
The node descriptor associated with the current rendezvous handler
instance.
state:
The current state of the rendezvous.
settings:
The rendezvous settings.
"""
node: _NodeDesc
state: _RendezvousState
settings: RendezvousSettings
def __init__(
self, node: _NodeDesc, state: _RendezvousState, settings: RendezvousSettings
) -> None:
self.node = node
self.state = state
self.settings = settings
class _RendezvousOpExecutor(ABC):
"""Executes rendezvous operations."""
@abstractmethod
def run(
self,
state_handler: Callable[[_RendezvousContext, float], _Action],
deadline: float,
) -> None:
"""Executes a rendezvous operation.
An operation is run inside a state machine and is expected to transition
the rendezvous from one state to another.
Args:
state_handler:
A callable that is expected to return the next state transition
action based on the current state of the rendezvous.
deadline:
The time, in seconds, at which the operation will be considered
timed-out.
"""
class _DistributedRendezvousOpExecutor(_RendezvousOpExecutor):
"""Executes rendezvous operations using a shared state.
Args:
node:
The node descriptor associated with the current rendezvous handler
instance.
state_holder:
The ``RendezvousStateHolder`` to use to sync the rendezvous state
with other nodes.
settings:
The rendezvous settings.
"""
_node: _NodeDesc
_state: _RendezvousState
_state_holder: _RendezvousStateHolder
_settings: RendezvousSettings
def __init__(
self,
node: _NodeDesc,
state_holder: _RendezvousStateHolder,
settings: RendezvousSettings,
) -> None:
self._node = node
self._state_holder = state_holder
self._settings = settings
def _record(self, message: str, node_state: NodeState = NodeState.RUNNING) -> None:
construct_and_record_rdzv_event(
name=f"{self.__class__.__name__}.{get_method_name()}",
run_id=self._settings.run_id,
message=message,
node_state=node_state,
hostname=self._node.fqdn,
pid=self._node.pid,
local_id=self._node.local_id,
)
def run(
self,
state_handler: Callable[[_RendezvousContext, float], _Action],
deadline: float,
) -> None:
"""See base class."""
action = None
while action != _Action.FINISH:
# Reads or writes the latest rendezvous state shared by all nodes in
# the rendezvous. Note that our local changes might get overridden
# by another node if that node synced its changes before us.
has_set = self._state_holder.sync()
if has_set is not None:
if has_set:
msg = (
f"The node '{self._node}' has successfully synced its local changes with "
f"other nodes in the rendezvous '{self._settings.run_id}'."
)
else:
msg = (
f"The node '{self._node}' has a stale state and failed to sync its local "
f"changes with other nodes in the rendezvous '{self._settings.run_id}'."
)
self._record(message=msg)
log.debug(msg)
self._state = self._state_holder.state
ctx = _RendezvousContext(self._node, self._state, self._settings)
# Determine the next action to take based on the current state of
# the rendezvous.
action = state_handler(ctx, deadline)
if action == _Action.FINISH:
continue
if action == _Action.ERROR_CLOSED:
raise RendezvousClosedError()
if action == _Action.ERROR_TIMEOUT:
raise RendezvousTimeoutError()
if action == _Action.SYNC:
# Delay the execution by one second to avoid overloading the
# backend if we are asked to poll for state changes.
_delay(seconds=1)
else:
if action == _Action.KEEP_ALIVE:
self._keep_alive()
elif action == _Action.ADD_TO_PARTICIPANTS:
self._add_to_participants()
elif action == _Action.ADD_TO_WAIT_LIST:
self._add_to_wait_list()
elif action == _Action.REMOVE_FROM_PARTICIPANTS:
self._remove_from_participants()
elif action == _Action.REMOVE_FROM_WAIT_LIST:
self._remove_from_wait_list()
elif action == _Action.MARK_RENDEZVOUS_COMPLETE:
self._mark_rendezvous_complete()
elif action == _Action.MARK_RENDEZVOUS_CLOSED:
self._mark_rendezvous_closed()
# Attempt to sync our changes back to other nodes.
self._state_holder.mark_dirty()
def _keep_alive(self) -> None:
msg = (
f"The node '{self._node}' updated its keep-alive heartbeat time for the rendezvous "
f"'{self._settings.run_id}'. Pending sync."
)
self._record(message=msg)
log.debug(msg)
self._state.last_heartbeats[self._node] = datetime.utcnow()
def _add_to_participants(self) -> None:
msg = (
f"The node '{self._node}' added itself to the participants of round "
f"{self._state.round} of the rendezvous '{self._settings.run_id}'. Pending sync."
)
self._record(message=msg)
log.debug(msg)
state = self._state
try:
state.wait_list.remove(self._node)
except KeyError:
pass
# The ranks of the participants will be set once the rendezvous is
# complete.
state.participants[self._node] = 0
self._keep_alive()
if len(state.participants) == self._settings.min_nodes:
state.deadline = datetime.utcnow() + self._settings.timeout.last_call
if len(state.participants) == self._settings.max_nodes:
self._mark_rendezvous_complete()
def _add_to_wait_list(self) -> None:
msg = (
f"The node '{self._node}' added itself to the wait list of round "
f"{self._state.round + 1} of the rendezvous '{self._settings.run_id}'. Pending sync."
)
self._record(message=msg)
log.debug(msg)
self._state.wait_list.add(self._node)
self._keep_alive()
def _remove_from_participants(self) -> None:
msg = (
f"The node '{self._node}' removed itself from the participants of round "
f"{self._state.round} of the rendezvous '{self._settings.run_id}'. Pending sync."
)
self._record(message=msg)
log.debug(msg)
state = self._state
del state.participants[self._node]
del state.last_heartbeats[self._node]
# Common epilogue shared with the sanitizer() function of
# _BackendRendezvousStateHolder.
_remove_participant_epilogue(state, self._settings)
def _remove_from_wait_list(self) -> None:
msg = (
f"The node '{self._node}' removed itself from the wait list of round "
f"{self._state.round + 1} of the rendezvous '{self._settings.run_id}'. Pending sync."
)
self._record(message=msg)
log.debug(msg)
self._state.wait_list.remove(self._node)
del self._state.last_heartbeats[self._node]
def _mark_rendezvous_complete(self) -> None:
msg = (
f"The node '{self._node}' marked round {self._state.round} of the rendezvous "
f"'{self._settings.run_id}' as complete. Pending sync."
)
self._record(message=msg, node_state=NodeState.SUCCEEDED)
log.debug(msg)
state = self._state
state.complete = True
state.deadline = None
# Assign the ranks.
for rank, node in enumerate(sorted(state.participants)):
state.participants[node] = rank
def _mark_rendezvous_closed(self) -> None:
msg = (
f"The node '{self._node}' marked the rendezvous '{self._settings.run_id}' as closed. "
"Pending sync."
)
self._record(message=msg, node_state=NodeState.SUCCEEDED)
log.debug(msg)
self._state.closed = True
def _should_keep_alive(ctx: _RendezvousContext) -> bool:
"""Determines whether a keep-alive heartbeat should be sent."""
try:
last_heartbeat = ctx.state.last_heartbeats[ctx.node]
except KeyError:
return False
return last_heartbeat <= datetime.utcnow() - ctx.settings.keep_alive_interval
class _RendezvousExitOp:
"""Represents a rendezvous exit operation."""
def __call__(self, ctx: _RendezvousContext, deadline: float) -> _Action:
if ctx.node in ctx.state.participants:
if time.monotonic() > deadline:
return _Action.ERROR_TIMEOUT
return _Action.REMOVE_FROM_PARTICIPANTS
return _Action.FINISH
class _RendezvousJoinOp:
"""Represents a rendezvous join operation."""
def __call__(self, ctx: _RendezvousContext, deadline: float) -> _Action:
state = ctx.state
# A closed rendezvous means that it no longer accepts new nodes.
if state.closed:
return _Action.ERROR_CLOSED
is_participant = ctx.node in state.participants
# If we are part of the rendezvous and it is already complete there is
# no further action to take.
if state.complete and is_participant:
return _Action.FINISH
now = time.monotonic()
if now > deadline:
rollback_period = 5 # 5 seconds
# If we still have time to rollback (a short period on top of the
# operation deadline), try to remove ourself from the rendezvous.
# It is okay if we can't though as our keep-alive will eventually
# expire.
if now <= deadline + rollback_period:
# If we are part of the rendezvous, it means we couldn't find
# enough participants to complete it on time.
if is_participant:
return _Action.REMOVE_FROM_PARTICIPANTS
# If we are in the wait list, it means we couldn't wait till the
# next round of the rendezvous.
if ctx.node in state.wait_list:
return _Action.REMOVE_FROM_WAIT_LIST
return _Action.ERROR_TIMEOUT
if state.complete:
# If we are here, it means we are not part of the rendezvous. In
# case the rendezvous has capacity for additional participants add
# ourself to the wait list for the next round.
if len(state.participants) < ctx.settings.max_nodes:
if ctx.node not in state.wait_list:
return _Action.ADD_TO_WAIT_LIST
elif is_participant:
# If the rendezvous has enough number of participants including us,
# check whether we have passed the rendezvous deadline. If yes,
# complete it.
if len(state.participants) >= ctx.settings.min_nodes:
if cast(datetime, state.deadline) < datetime.utcnow():
return _Action.MARK_RENDEZVOUS_COMPLETE
else:
# The rendezvous is not complete yet and we are not part of it. Try
# to join.
return _Action.ADD_TO_PARTICIPANTS
if _should_keep_alive(ctx):
return _Action.KEEP_ALIVE
# At this point either the rendezvous is not complete, but we are part
# of it, which means we have to wait for other participants to join; or
# the rendezvous is complete, but we are not part of it, which means we
# have to wait for the next round.
return _Action.SYNC
class _RendezvousCloseOp:
"""Represents a rendezvous close operation."""
def __call__(self, ctx: _RendezvousContext, deadline: float) -> _Action:
if ctx.state.closed:
return _Action.FINISH
if time.monotonic() > deadline:
return _Action.ERROR_TIMEOUT
return _Action.MARK_RENDEZVOUS_CLOSED
class _RendezvousKeepAliveOp:
"""Represents a rendezvous keep-alive update operation."""
def __call__(self, ctx: _RendezvousContext, deadline: float) -> _Action:
if _should_keep_alive(ctx):
if time.monotonic() > deadline:
return _Action.ERROR_TIMEOUT
return _Action.KEEP_ALIVE
return _Action.FINISH
class DynamicRendezvousHandler(RendezvousHandler):
"""Represents a handler that sets up a rendezvous among a set of nodes."""
# Static
_node_desc_generator = _NodeDescGenerator()
_this_node: _NodeDesc
_settings: RendezvousSettings
_backend_name: str
_store: Store
_state_holder: _RendezvousStateHolder
_op_executor: _RendezvousOpExecutor
_heartbeat_lock: threading.Lock
_keep_alive_timer: Optional[_PeriodicTimer]
@classmethod
def from_backend(
cls,
run_id: str,
store: Store,
backend: RendezvousBackend,
min_nodes: int,
max_nodes: int,
timeout: Optional[RendezvousTimeout] = None,
):
"""Creates a new :py:class:`DynamicRendezvousHandler`.
Args:
run_id:
The run id of the rendezvous.
store:
The C10d store to return as part of the rendezvous.
backend:
The backend to use to hold the rendezvous state.
min_nodes:
The minimum number of nodes to admit to the rendezvous.
max_nodes:
The maximum number of nodes to admit to the rendezvous.
timeout:
The timeout configuration of the rendezvous.
"""
# We associate each handler instance with a unique node descriptor.
node = cls._node_desc_generator.generate()
settings = RendezvousSettings(
run_id,
min_nodes,
max_nodes,
timeout or RendezvousTimeout(),
keep_alive_interval=timedelta(seconds=5),
keep_alive_max_attempt=3,
)
state_holder = _BackendRendezvousStateHolder(backend, settings)
return cls(node, settings, backend.name, store, state_holder)
def __init__(
self,
node: _NodeDesc,
settings: RendezvousSettings,
backend_name: str,
store: Store,
state_holder: _RendezvousStateHolder,
) -> None:
if not settings.run_id:
raise ValueError("The run id must be a non-empty string.")
if settings.min_nodes < 1:
raise ValueError(
f"The minimum number of nodes ({settings.min_nodes}) must be greater than zero."
)
if settings.max_nodes < settings.min_nodes:
raise ValueError(
f"The maximum number of nodes ({settings.max_nodes}) must be greater than or equal "
f"to the minimum number of nodes ({settings.min_nodes})."
)
self._this_node = node
self._settings = settings
self._backend_name = backend_name
self._store = store
self._state_holder = state_holder
self._op_executor = _DistributedRendezvousOpExecutor(
self._this_node, self._state_holder, self._settings
)
self._heartbeat_lock = threading.Lock()
self._keep_alive_timer = None
def _record(
self,
message: str,
node_state: NodeState = NodeState.RUNNING,
rank: Optional[int] = None,
) -> None:
construct_and_record_rdzv_event(
name=f"{self.__class__.__name__}.{get_method_name()}",
run_id=self._settings.run_id,
message=message,
node_state=node_state,
hostname=self._this_node.fqdn,
pid=self._this_node.pid,
local_id=self._this_node.local_id,
rank=rank,
)
@property
def settings(self) -> RendezvousSettings:
"""Gets the settings of the rendezvous."""
return self._settings
def get_backend(self) -> str:
"""See base class."""
return self._backend_name
def next_rendezvous(self) -> Tuple[Store, int, int]:
"""See base class."""
msg = (
f"The node '{self._this_node}' attempts to join the next round of the rendezvous "
f"'{self._settings.run_id}'."
)
self._record(message=msg)
log.info(msg)
try:
self._stop_heartbeats()
# Delay the execution for a small random amount of time if this is our
# first run. This will slightly skew the rendezvous attempts across the
# nodes and reduce the load on the backend.
if self._state_holder.state.round == 0:
_delay(seconds=(0, 0.3))
exit_op = _RendezvousExitOp()
join_op = _RendezvousJoinOp()
deadline = self._get_deadline(self._settings.timeout.join)
self._op_executor.run(exit_op, deadline)
self._op_executor.run(join_op, deadline)
self._start_heartbeats()
rank, world_size = self._get_world()
store = self._get_store()
except Exception as e:
self._record(
message=f"{type(e).__name__}: {str(e)}",
node_state=NodeState.FAILED,
)
raise
msg = (
f"The node '{self._this_node}' has joined round {self._state_holder.state.round} of "
f"the rendezvous '{self._settings.run_id}' as rank {rank} in a world of size "
f"{world_size}."
)
self._record(message=msg, rank=rank)
log.info(msg)
return store, rank, world_size
def is_closed(self) -> bool:
"""See base class."""
try:
with self._heartbeat_lock:
self._state_holder.sync()
return self._state_holder.state.closed
except Exception as e:
self._record(
message=f"{type(e).__name__}: {str(e)}",
node_state=NodeState.FAILED,
)
raise
def set_closed(self) -> None:
"""See base class."""
try:
with self._heartbeat_lock:
self._close()
except Exception as e:
self._record(
message=f"{type(e).__name__}: {str(e)}",
node_state=NodeState.FAILED,
)
raise
def num_nodes_waiting(self) -> int:
"""See base class."""
try:
with self._heartbeat_lock:
self._state_holder.sync()
return len(self._state_holder.state.wait_list)
except Exception as e:
self._record(
message=f"{type(e).__name__}: {str(e)}",
node_state=NodeState.FAILED,
)
raise
def get_run_id(self) -> str:
"""See base class."""
return self._settings.run_id
def shutdown(self) -> bool:
"""See base class."""
self._stop_heartbeats()
try:
self._close()
return True
except RendezvousError as ex:
msg = (
f"The node '{self._this_node}' has failed to shutdown the rendezvous "
f"'{self._settings.run_id}' due to an error of type {type(ex).__name__}."
)
self._record(message=msg, node_state=NodeState.FAILED)
log.warning(msg)
return False
except Exception as e:
self._record(
message=f"{type(e).__name__}: {str(e)}",
node_state=NodeState.FAILED,
)
raise
def _close(self) -> None:
op = _RendezvousCloseOp()
deadline = self._get_deadline(self._settings.timeout.close)
self._op_executor.run(op, deadline)
msg = f"The node '{self._this_node}' has closed the rendezvous '{self._settings.run_id}'."
self._record(message=msg, node_state=NodeState.SUCCEEDED)
log.info(msg)
@staticmethod
def _keep_alive_weak(weak_self) -> None:
self = weak_self()
if self is not None:
self._keep_alive()
def _keep_alive(self) -> None:
self._heartbeat_lock.acquire()
op = _RendezvousKeepAliveOp()
deadline = self._get_deadline(self._settings.timeout.heartbeat)
try:
self._op_executor.run(op, deadline)
msg = (
f"The node '{self._this_node}' has sent a keep-alive heartbeat to the rendezvous "
f"'{self._settings.run_id}'."
)
self._record(message=msg)
log.debug(msg)
except RendezvousError as ex:
msg = (
f"The node '{self._this_node}' has failed to send a keep-alive heartbeat to the "
f"rendezvous '{self._settings.run_id}' due to an error of type {type(ex).__name__}."
)
self._record(message=msg, node_state=NodeState.FAILED)
log.warning(msg)
finally:
self._heartbeat_lock.release()
def _start_heartbeats(self) -> None:
self._keep_alive_timer = _PeriodicTimer(
self._settings.keep_alive_interval, self._keep_alive_weak, weakref.ref(self)
)
self._keep_alive_timer.set_name(f"RendezvousKeepAliveTimer_{self._this_node.local_id}")
self._keep_alive_timer.start()
def _stop_heartbeats(self) -> None:
if self._keep_alive_timer is None:
return
self._keep_alive_timer.cancel()
def _get_world(self) -> Tuple[int, int]:
state = self._state_holder.state
return state.participants[self._this_node], len(state.participants)
def _get_store(self) -> Store:
key_prefix = f"torch.rendezvous.{self._settings.run_id}.{self._state_holder.state.round}"
return PrefixStore(key_prefix, self._store)
def _get_deadline(self, timeout: timedelta) -> float:
return time.monotonic() + timeout.total_seconds()
def _get_timeout(params: RendezvousParameters, key: str) -> Optional[timedelta]:
timeout = params.get_as_int(key + "_timeout")
if timeout is None:
return None
return timedelta(seconds=timeout)
def create_handler(
store: Store, backend: RendezvousBackend, params: RendezvousParameters
) -> DynamicRendezvousHandler:
"""Creates a new :py:class:`DynamicRendezvousHandler` from the specified
parameters.
Args:
store:
The C10d store to return as part of the rendezvous.
backend:
The backend to use to hold the rendezvous state.
+-------------------+------------------------------------------------------+
| Parameter | Description |
+===================+======================================================+
| join_timeout | The total time, in seconds, within which the |
| | rendezvous is expected to complete. Defaults to 600 |
| | seconds. |
+-------------------+------------------------------------------------------+
| last_call_timeout | An additional wait amount, in seconds, before |
| | completing the rendezvous once the minimum number of |
| | nodes has been reached. Defaults to 30 seconds. |
+-------------------+------------------------------------------------------+
| close_timeout | The time, in seconds, within which the rendezvous is |
| | expected to close after a call to |
| | :py:meth:`RendezvousHandler.set_closed` or |
| | :py:meth:`RendezvousHandler.shutdown`. Defaults to |
| | 30 seconds. |
+-------------------+------------------------------------------------------+
"""
try:
timeout = RendezvousTimeout(
_get_timeout(params, "join"),
_get_timeout(params, "last_call"),
_get_timeout(params, "close"),
)
return DynamicRendezvousHandler.from_backend(
params.run_id,
store,
backend,
params.min_nodes,
params.max_nodes,
timeout,
)
except Exception as e:
construct_and_record_rdzv_event(
message=f"{type(e).__name__}: {str(e)}",
run_id=params.run_id,
node_state=NodeState.FAILED,
)
raise
| pytorch-master | torch/distributed/elastic/rendezvous/dynamic_rendezvous.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import atexit
import logging
import os
import shlex
import shutil
import socket
import subprocess
import tempfile
import time
from typing import Optional, TextIO, Union
try:
import etcd # type: ignore[import]
except ModuleNotFoundError:
pass
log = logging.getLogger(__name__)
def find_free_port():
"""
Finds a free port and binds a temporary socket to it so that
the port can be "reserved" until used.
.. note:: the returned socket must be closed before using the port,
otherwise a ``address already in use`` error will happen.
The socket should be held and closed as close to the
consumer of the port as possible since otherwise, there
is a greater chance of race-condition where a different
process may see the port as being free and take it.
Returns: a socket binded to the reserved free port
Usage::
sock = find_free_port()
port = sock.getsockname()[1]
sock.close()
use_port(port)
"""
addrs = socket.getaddrinfo(
host="localhost", port=None, family=socket.AF_UNSPEC, type=socket.SOCK_STREAM
)
for addr in addrs:
family, type, proto, _, _ = addr
try:
s = socket.socket(family, type, proto)
s.bind(("localhost", 0))
s.listen(0)
return s
except OSError as e:
s.close()
print(f"Socket creation attempt failed: {e}")
raise RuntimeError("Failed to create a socket")
def stop_etcd(subprocess, data_dir: Optional[str] = None):
if subprocess and subprocess.poll() is None:
log.info("stopping etcd server")
subprocess.terminate()
subprocess.wait()
if data_dir:
log.info(f"deleting etcd data dir: {data_dir}")
shutil.rmtree(data_dir, ignore_errors=True)
class EtcdServer:
"""
.. note:: tested on etcd server v3.4.3
Starts and stops a local standalone etcd server on a random free
port. Useful for single node, multi-worker launches or testing,
where a sidecar etcd server is more convenient than having to
separately setup an etcd server.
This class registers a termination handler to shutdown the etcd
subprocess on exit. This termination handler is NOT a substitute for
calling the ``stop()`` method.
The following fallback mechanism is used to find the etcd binary:
1. Uses env var TORCHELASTIC_ETCD_BINARY_PATH
2. Uses ``<this file root>/bin/etcd`` if one exists
3. Uses ``etcd`` from ``PATH``
Usage
::
server = EtcdServer("/usr/bin/etcd", 2379, "/tmp/default.etcd")
server.start()
client = server.get_client()
# use client
server.stop()
Args:
etcd_binary_path: path of etcd server binary (see above for fallback path)
"""
def __init__(self, data_dir: Optional[str] = None):
self._port = -1
self._host = "localhost"
root = os.path.dirname(__file__)
default_etcd_bin = os.path.join(root, "bin/etcd")
self._etcd_binary_path = os.environ.get(
"TORCHELASTIC_ETCD_BINARY_PATH", default_etcd_bin
)
if not os.path.isfile(self._etcd_binary_path):
self._etcd_binary_path = "etcd"
self._base_data_dir = (
data_dir if data_dir else tempfile.mkdtemp(prefix="torchelastic_etcd_data")
)
self._etcd_cmd = None
self._etcd_proc: Optional[subprocess.Popen] = None
def _get_etcd_server_process(self) -> subprocess.Popen:
if not self._etcd_proc:
raise RuntimeError(
"No etcd server process started. Call etcd_server.start() first"
)
else:
return self._etcd_proc
def get_port(self) -> int:
"""
Returns:
the port the server is running on.
"""
return self._port
def get_host(self) -> str:
"""
Returns:
the host the server is running on.
"""
return self._host
def get_endpoint(self) -> str:
"""
Returns:
the etcd server endpoint (host:port)
"""
return f"{self._host}:{self._port}"
def start(
self,
timeout: int = 60,
num_retries: int = 3,
stderr: Union[int, TextIO, None] = None,
) -> None:
"""
Starts the server, and waits for it to be ready. When this function
returns the sever is ready to take requests.
Args:
timeout: time (in seconds) to wait for the server to be ready
before giving up.
num_retries: number of retries to start the server. Each retry
will wait for max ``timeout`` before considering it as failed.
stderr: the standard error file handle. Valid values are
`subprocess.PIPE`, `subprocess.DEVNULL`, an existing file
descriptor (a positive integer), an existing file object, and
`None`.
Raises:
TimeoutError: if the server is not ready within the specified timeout
"""
curr_retries = 0
while True:
try:
data_dir = os.path.join(self._base_data_dir, str(curr_retries))
os.makedirs(data_dir, exist_ok=True)
return self._start(data_dir, timeout, stderr)
except Exception as e:
curr_retries += 1
stop_etcd(self._etcd_proc)
log.warning(
f"Failed to start etcd server, got error: {str(e)}, retrying"
)
if curr_retries >= num_retries:
shutil.rmtree(self._base_data_dir, ignore_errors=True)
raise
atexit.register(stop_etcd, self._etcd_proc, self._base_data_dir)
def _start(
self, data_dir: str, timeout: int = 60, stderr: Union[int, TextIO, None] = None
) -> None:
sock = find_free_port()
sock_peer = find_free_port()
self._port = sock.getsockname()[1]
peer_port = sock_peer.getsockname()[1]
etcd_cmd = shlex.split(
" ".join(
[
self._etcd_binary_path,
"--enable-v2",
"--data-dir",
data_dir,
"--listen-client-urls",
f"http://{self._host}:{self._port}",
"--advertise-client-urls",
f"http://{self._host}:{self._port}",
"--listen-peer-urls",
f"http://{self._host}:{peer_port}",
]
)
)
log.info(f"Starting etcd server: [{etcd_cmd}]")
sock.close()
sock_peer.close()
self._etcd_proc = subprocess.Popen(etcd_cmd, close_fds=True, stderr=stderr)
self._wait_for_ready(timeout)
def get_client(self):
"""
Returns:
An etcd client object that can be used to make requests to
this server.
"""
return etcd.Client(
host=self._host, port=self._port, version_prefix="/v2", read_timeout=10
)
def _wait_for_ready(self, timeout: int = 60) -> None:
client = etcd.Client(
host=f"{self._host}", port=self._port, version_prefix="/v2", read_timeout=5
)
max_time = time.time() + timeout
while time.time() < max_time:
if self._get_etcd_server_process().poll() is not None:
# etcd server process finished
exitcode = self._get_etcd_server_process().returncode
raise RuntimeError(
f"Etcd server process exited with the code: {exitcode}"
)
try:
log.info(f"etcd server ready. version: {client.version}")
return
except Exception:
time.sleep(1)
raise TimeoutError("Timed out waiting for etcd server to be ready!")
def stop(self) -> None:
"""
Stops the server and cleans up auto generated resources (e.g. data dir)
"""
log.info("EtcdServer stop method called")
stop_etcd(self._etcd_proc, self._base_data_dir)
| pytorch-master | torch/distributed/elastic/rendezvous/etcd_server.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import logging
from typing import Tuple, cast, Optional
# pyre-ignore[21]: Could not find name `Store` in `torch.distributed`.
from torch.distributed import Store, TCPStore, PrefixStore
from torch.distributed.elastic.rendezvous import RendezvousHandler, RendezvousParameters
from torch.distributed.elastic.rendezvous.utils import parse_rendezvous_endpoint
log = logging.getLogger(__name__)
_default_timeout_seconds = 600
class StaticTCPRendezvous(RendezvousHandler):
"""
Static rendezvous that is a wrapper around the TCPStore.
Creates TCPStore based on the input parameters with the
listener on the agent with group_rank=0
"""
def __init__(
self,
master_addr: str,
master_port: int,
rank: int,
world_size: int,
run_id: str,
timeout: int,
):
self.master_addr = master_addr
self.master_port = master_port
self.rank = rank
self.world_size = world_size
self.run_id = run_id
self.timeout = datetime.timedelta(seconds=timeout)
self._store: Optional[Store] = None
def get_backend(self) -> str:
return "static"
def next_rendezvous(self) -> Tuple[Store, int, int]:
log.info("Creating TCPStore as the c10d::Store implementation")
if not self._store:
is_master = self.rank == 0
self._store = TCPStore( # type: ignore[call-arg]
self.master_addr,
self.master_port,
self.world_size,
is_master,
self.timeout,
multi_tenant=True,
)
store = PrefixStore(self.run_id, self._store)
return store, self.rank, self.world_size
def is_closed(self):
return False
def set_closed(self):
pass
def num_nodes_waiting(self):
return 0
def get_run_id(self) -> str:
return self.run_id
def shutdown(self) -> bool:
return True
def create_rdzv_handler(params: RendezvousParameters) -> RendezvousHandler:
if "rank" not in params.config:
raise ValueError(
"rank is absent in RendezvousParameters."
"Try add --node_rank to the cmd request"
)
endpoint = params.endpoint.strip()
if not endpoint:
raise ValueError(
"endpoint is absent in RendezvousParameters"
"Try add --master_port and --master_addr to the cmd request"
)
master_addr, master_port = parse_rendezvous_endpoint(endpoint, -1)
if master_port == -1:
raise ValueError(
f"Port is absent in endpoint: {endpoint}. Try launching with --master_port"
)
world_size = params.max_nodes
rank = cast(int, params.config.get("rank"))
run_id = params.run_id
if "timeout" in params.config:
timeout = int(params.config["timeout"])
else:
timeout = _default_timeout_seconds
return StaticTCPRendezvous(
master_addr, master_port, rank, world_size, run_id, timeout
)
| pytorch-master | torch/distributed/elastic/rendezvous/static_tcp_rendezvous.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import time
from concurrent.futures._base import Future
from concurrent.futures.thread import ThreadPoolExecutor
from threading import Event
from typing import Dict, List, TextIO
log = logging.getLogger(__name__)
def tail_logfile(
header: str, file: str, dst: TextIO, finished: Event, interval_sec: float
):
while not os.path.exists(file):
if finished.is_set():
return
time.sleep(interval_sec)
with open(file, "r") as fp:
while True:
line = fp.readline()
if line:
dst.write(f"{header}{line}")
else: # reached EOF
if finished.is_set():
# log line producer is finished
break
else:
# log line producer is still going
# wait for a bit before looping again
time.sleep(interval_sec)
class TailLog:
"""
Tails the given log files. The log files do not have to exist when the
``start()`` method is called. The tail-er will gracefully wait until the
log files are created by the producer and will tail the contents of the
log files until the ``stop()`` method is called.
.. warning:: ``TailLog`` will wait indefinitely for the log file to be created!
Each log file's line will be suffixed with a header of the form: ``[{name}{idx}]:``,
where the ``name`` is user-provided and ``idx`` is the index of the log file
in the ``log_files`` mapping.
Usage:
::
log_files = {0: "/tmp/0_stdout.log", 1: "/tmp/1_stdout.log"}
tailer = TailLog("trainer", log_files, sys.stdout).start()
# actually run the trainers to produce 0_stdout.log and 1_stdout.log
run_trainers()
tailer.stop()
# once run_trainers() start writing the ##_stdout.log files
# the tailer will print to sys.stdout:
# >>> [trainer0]:log_line1
# >>> [trainer1]:log_line1
# >>> [trainer0]:log_line2
# >>> [trainer0]:log_line3
# >>> [trainer1]:log_line2
.. note:: Due to buffering log lines between files may not necessarily
be printed out in order. You should configure your application's
logger to suffix each log line with a proper timestamp.
"""
def __init__(
self,
name: str,
log_files: Dict[int, str],
dst: TextIO,
interval_sec: float = 0.1,
):
n = len(log_files)
self._threadpool = None
if n > 0:
self._threadpool = ThreadPoolExecutor(
max_workers=n,
thread_name_prefix=f"{self.__class__.__qualname__}_{name}",
)
self._name = name
self._dst = dst
self._log_files = log_files
self._finished_events: Dict[int, Event] = {
local_rank: Event() for local_rank in log_files.keys()
}
self._futs: List[Future] = []
self._interval_sec = interval_sec
self._stopped = False
def start(self) -> "TailLog":
if not self._threadpool:
return self
for local_rank, file in self._log_files.items():
self._futs.append(
self._threadpool.submit(
tail_logfile,
header=f"[{self._name}{local_rank}]:",
file=file,
dst=self._dst,
finished=self._finished_events[local_rank],
interval_sec=self._interval_sec,
)
)
return self
def stop(self) -> None:
for finished in self._finished_events.values():
finished.set()
for local_rank, f in enumerate(self._futs):
try:
f.result()
except Exception as e:
log.error(
f"error in log tailor for {self._name}{local_rank}."
f" {e.__class__.__qualname__}: {e}",
)
if self._threadpool:
self._threadpool.shutdown(wait=True)
self._stopped = True
def stopped(self) -> bool:
return self._stopped
| pytorch-master | torch/distributed/elastic/multiprocessing/tail_log.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
Library that launches and manages ``n`` copies of worker subprocesses
either specified by a function or a binary.
For functions, it uses ``torch.multiprocessing`` (and therefore python
``multiprocessing``) to spawn/fork worker processes. For binaries it uses python
``subprocessing.Popen`` to create worker processes.
Usage 1: Launching two trainers as a function
::
from torch.distributed.elastic.multiprocessing import Std, start_processes
def trainer(a, b, c):
pass # train
# runs two trainers
# LOCAL_RANK=0 trainer(1,2,3)
# LOCAL_RANK=1 trainer(4,5,6)
ctx = start_processes(
name="trainer",
entrypoint=trainer,
args={0: (1,2,3), 1: (4,5,6)},
envs={0: {"LOCAL_RANK": 0}, 1: {"LOCAL_RANK": 1}},
log_dir="/tmp/foobar",
redirects=Std.ALL, # write all worker stdout/stderr to a log file
tee={0: Std.ERR}, # tee only local rank 0's stderr to console
)
# waits for all copies of trainer to finish
ctx.wait()
Usage 2: Launching 2 echo workers as a binary
::
# same as invoking
# echo hello
# echo world > stdout.log
ctx = start_processes(
name="echo"
entrypoint="echo",
log_dir="/tmp/foobar",
args={0: "hello", 1: "world"},
redirects={1: Std.OUT},
)
Just like ``torch.multiprocessing``, the return value of the function
:func:`start_processes` is a process context (:class:`api.PContext`). If a function
was launched, a :class:`api.MultiprocessContext` is returned and if a binary
was launched a :class:`api.SubprocessContext` is returned. Both are specific
implementations of the parent :class:`api.PContext` class.
"""
import os
from typing import Callable, Dict, Tuple, Union
from torch.distributed.elastic.multiprocessing.api import ( # noqa: F401
MultiprocessContext,
PContext,
ProcessFailure,
RunProcsResult,
Std,
SignalException,
SubprocessContext,
_validate_full_rank,
to_map,
)
from torch.distributed.elastic.utils.logging import get_logger
log = get_logger()
def start_processes(
name: str,
entrypoint: Union[Callable, str],
args: Dict[int, Tuple],
envs: Dict[int, Dict[str, str]],
log_dir: str,
start_method: str = "spawn",
redirects: Union[Std, Dict[int, Std]] = Std.NONE,
tee: Union[Std, Dict[int, Std]] = Std.NONE,
) -> PContext:
"""
Starts ``n`` copies of ``entrypoint`` processes with the provided options.
``entrypoint`` is either a ``Callable`` (function) or a ``str`` (binary).
The number of copies is determined by the number of entries for ``args`` and
``envs`` arguments, which need to have the same key set.
``args`` and ``env`` parameters are the arguments and environment variables
to pass down to the entrypoint mapped by the replica index (local rank).
All local ranks must be accounted for.
That is, the keyset should be ``{0,1,...,(nprocs-1)}``.
.. note:: When the ``entrypoint`` is a binary (``str``), ``args`` can only be strings.
If any other type is given, then it is casted to a string representation
(e.g. ``str(arg1)``). Furthermore, a binary failure will only write
an ``error.json`` error file if the main function is annotated with
``torch.distributed.elastic.multiprocessing.errors.record``. For function launches,
this is done by default and there is no need to manually annotate
with the ``@record`` annotation.
``redirects`` and ``tees`` are bitmasks specifying which std stream(s) to redirect
to a log file in the ``log_dir``. Valid mask values are defined in ``Std``.
To redirect/tee only certain local ranks, pass ``redirects`` as a map with the key as
the local rank to specify the redirect behavior for.
Any missing local ranks will default to ``Std.NONE``.
``tee`` acts like the unix "tee" command in that it redirects + prints to console.
To avoid worker stdout/stderr from printing to console, use the ``redirects`` parameter.
For each process, the ``log_dir`` will contain:
#. ``{local_rank}/error.json``: if the process failed, a file with the error info
#. ``{local_rank}/stdout.json``: if ``redirect & STDOUT == STDOUT``
#. ``{local_rank}/stderr.json``: if ``redirect & STDERR == STDERR``
.. note:: It is expected that the ``log_dir`` exists, is empty, and is a directory.
Example:
::
log_dir = "/tmp/test"
# ok; two copies of foo: foo("bar0"), foo("bar1")
start_processes(
name="trainer",
entrypoint=foo,
args:{0:("bar0",), 1:("bar1",),
envs:{0:{}, 1:{}},
log_dir=log_dir
)
# invalid; envs missing for local rank 1
start_processes(
name="trainer",
entrypoint=foo,
args:{0:("bar0",), 1:("bar1",),
envs:{0:{}},
log_dir=log_dir
)
# ok; two copies of /usr/bin/touch: touch file1, touch file2
start_processes(
name="trainer",
entrypoint="/usr/bin/touch",
args:{0:("file1",), 1:("file2",),
envs:{0:{}, 1:{}},
log_dir=log_dir
)
# caution; arguments casted to string, runs:
# echo "1" "2" "3" and echo "[1, 2, 3]"
start_processes(
name="trainer",
entrypoint="/usr/bin/echo",
args:{0:(1,2,3), 1:([1,2,3],),
envs:{0:{}, 1:{}},
log_dir=log_dir
)
Args:
name: a human readable short name that describes what the processes are
(used as header when tee'ing stdout/stderr outputs)
entrypoint: either a ``Callable`` (function) or ``cmd`` (binary)
args: arguments to each replica
envs: env vars to each replica
log_dir: directory used to write log files
nprocs: number of copies to create (one on each process)
start_method: multiprocessing start method (spawn, fork, forkserver)
ignored for binaries
redirects: which std streams to redirect to a log file
tees: which std streams to redirect + print to console
"""
# listdir raises FileNotFound or NotADirectoryError so no need to check manually
if os.listdir(log_dir):
raise RuntimeError(
f"log_dir: {log_dir} is not empty, please provide an empty log_dir"
)
nprocs = len(args)
_validate_full_rank(args, nprocs, "args")
_validate_full_rank(envs, nprocs, "envs")
# create subdirs for each local rank in the logs_dir
# logs_dir
# |- 0
# |- error.json
# |- stdout.log
# |- stderr.log
# |- ...
# |- (nprocs-1)
redirs = to_map(redirects, nprocs)
ts = to_map(tee, nprocs)
# to tee stdout/stderr we first redirect into a file
# then tail -f stdout.log/stderr.log so add tee settings to redirects
for local_rank, tee_std in ts.items():
redirect_std = redirs[local_rank]
redirs[local_rank] = redirect_std | tee_std
stdouts = {local_rank: "" for local_rank in range(nprocs)}
stderrs = {local_rank: "" for local_rank in range(nprocs)}
tee_stdouts: Dict[int, str] = {}
tee_stderrs: Dict[int, str] = {}
error_files = {}
for local_rank in range(nprocs):
clogdir = os.path.join(log_dir, str(local_rank))
os.mkdir(clogdir)
rd = redirs[local_rank]
if (rd & Std.OUT) == Std.OUT:
stdouts[local_rank] = os.path.join(clogdir, "stdout.log")
if (rd & Std.ERR) == Std.ERR:
stderrs[local_rank] = os.path.join(clogdir, "stderr.log")
t = ts[local_rank]
if t & Std.OUT == Std.OUT:
tee_stdouts[local_rank] = stdouts[local_rank]
if t & Std.ERR == Std.ERR:
tee_stderrs[local_rank] = stderrs[local_rank]
error_file = os.path.join(clogdir, "error.json")
error_files[local_rank] = error_file
log.info(f"Setting worker{local_rank} reply file to: {error_file}")
envs[local_rank]["TORCHELASTIC_ERROR_FILE"] = error_file
context: PContext
if isinstance(entrypoint, str):
context = SubprocessContext(
name=name,
entrypoint=entrypoint,
args=args,
envs=envs,
stdouts=stdouts,
stderrs=stderrs,
tee_stdouts=tee_stdouts,
tee_stderrs=tee_stderrs,
error_files=error_files,
)
else:
context = MultiprocessContext(
name=name,
entrypoint=entrypoint,
args=args,
envs=envs,
stdouts=stdouts,
stderrs=stderrs,
tee_stdouts=tee_stdouts,
tee_stderrs=tee_stderrs,
error_files=error_files,
start_method=start_method,
)
try:
context.start()
return context
except Exception:
context.close()
raise
| pytorch-master | torch/distributed/elastic/multiprocessing/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import abc
import logging
import os
import re
import signal
import subprocess
import sys
import time
from contextlib import nullcontext
from dataclasses import dataclass, field
from enum import IntFlag
from multiprocessing import synchronize
from types import FrameType
from typing import Any, Callable, Dict, Optional, Set, Tuple, Union
import torch.multiprocessing as mp
from torch.distributed.elastic.multiprocessing.errors import ProcessFailure, record
from torch.distributed.elastic.multiprocessing.redirects import (
redirect_stderr,
redirect_stdout,
)
from torch.distributed.elastic.multiprocessing.tail_log import TailLog
IS_WINDOWS = sys.platform == "win32"
IS_MACOS = sys.platform == "darwin"
log = logging.getLogger(__name__)
class SignalException(Exception):
"""
Exception is raised inside the torchelastic agent process by the termination handler
if the death signal got received by the process.
"""
def __init__(self, msg: str, sigval: signal.Signals) -> None:
super().__init__(msg)
self.sigval = sigval
def _terminate_process_handler(signum: int, frame: Optional[FrameType]) -> None:
"""Termination handler that raises exceptions on the main process.
When the process receives death signal(SIGTERM, SIGINT), this termination handler will
be invoked. It raises the ``SignalException`` exception that should be processed by the
user code. Python does not terminate process after the termination handler is finished,
so the exception should not be silently ignored, otherwise the process will never
be terminated.
"""
sigval = signal.Signals(signum)
raise SignalException(f"Process {os.getpid()} got signal: {sigval}", sigval=sigval)
def _get_kill_signal() -> signal.Signals:
"""
Get the kill signal. SIGKILL for unix, CTRL_C_EVENT for windows.
"""
if IS_WINDOWS:
return signal.CTRL_C_EVENT # type: ignore[attr-defined] # noqa: F821
else:
return signal.SIGKILL
def _get_default_signal() -> signal.Signals:
"""
Get the default termination signal. SIGTERM for unix, CTRL_C_EVENT for windows.
"""
if IS_WINDOWS:
return signal.CTRL_C_EVENT # type: ignore[attr-defined] # noqa: F821
else:
return signal.SIGTERM
def _validate_full_rank(d: Dict[int, Any], nprocs: int, what: str):
actual_keys = set(d.keys())
expected_keys = set(range(nprocs))
if actual_keys != expected_keys:
raise RuntimeError(
f"{what}, local rank mapping mismatch,"
f" expected: {expected_keys}, actual: {actual_keys}"
)
_MAPPING_REGEX = r"^(\d:[0123],)*(\d:[0123])$"
_VALUE_REGEX = r"^[0123]$"
class Std(IntFlag):
NONE = 0
OUT = 1
ERR = 2
ALL = OUT | ERR
@classmethod
def from_str(cls, vm: str) -> Union["Std", Dict[int, "Std"]]:
"""
Example:
::
from_str("0") -> Std.NONE
from_str("1") -> Std.OUT
from_str("0:3,1:0,2:1,3:2") -> {0: Std.ALL, 1: Std.NONE, 2: Std.OUT, 3: Std.ERR}
Any other input raises an exception
"""
def to_std(v):
v = int(v)
for s in Std:
if s == v:
return s
# return None -> should NEVER reach here since we regex check input
if re.match(_VALUE_REGEX, vm): # vm is a number (e.g. 0)
return to_std(vm)
elif re.match(_MAPPING_REGEX, vm): # vm is a mapping (e.g. 0:1,1:2)
d: Dict[int, Std] = {}
for m in vm.split(","):
i, v = m.split(":")
d[int(i)] = to_std(v)
return d
else:
raise ValueError(
f"{vm} does not match: <{_VALUE_REGEX}> or <{_MAPPING_REGEX}>"
)
def to_map(
val_or_map: Union[Std, Dict[int, Std]], local_world_size: int
) -> Dict[int, Std]:
"""
Certain APIs take redirect settings either as a single value (e.g. apply to all
local ranks) or as an explicit user-provided mapping. This method is a convenience
method that converts a value or mapping into a mapping.
Example:
::
to_map(Std.OUT, local_world_size=2) # returns: {0: Std.OUT, 1: Std.OUT}
to_map({1: Std.OUT}, local_world_size=2) # returns: {0: Std.NONE, 1: Std.OUT}
to_map({0: Std.OUT, 1: Std.OUT}, local_world_size=2) # returns: {0: Std.OUT, 1: Std.OUT}
"""
if isinstance(val_or_map, Std):
return {i: val_or_map for i in range(local_world_size)}
else:
map = {}
for i in range(local_world_size):
map[i] = val_or_map.get(i, Std.NONE)
return map
@dataclass
class RunProcsResult:
"""
Results of a completed run of processes started with ``start_processes()``.
Returned by ``PContext``.
Note the following:
1. All fields are mapped by local rank
2. ``return_values`` - only populated for functions (not the binaries).
3. ``stdouts`` - path to stdout.log (empty string if no redirect)
4. ``stderrs`` - path to stderr.log (empty string if no redirect)
"""
return_values: Dict[int, Any] = field(default_factory=dict)
failures: Dict[int, ProcessFailure] = field(default_factory=dict)
stdouts: Dict[int, str] = field(default_factory=dict)
stderrs: Dict[int, str] = field(default_factory=dict)
def is_failed(self) -> bool:
return len(self.failures) > 0
class PContext(abc.ABC):
"""
The base class that standardizes operations over a set of processes
that are launched via different mechanisms. The name ``PContext``
is intentional to disambiguate with ``torch.multiprocessing.ProcessContext``.
.. warning:: stdouts and stderrs should ALWAYS be a superset of
tee_stdouts and tee_stderrs (respectively) this is b/c
tee is implemented as a redirect + tail -f <stdout/stderr.log>
"""
def __init__(
self,
name: str,
entrypoint: Union[Callable, str],
args: Dict[int, Tuple],
envs: Dict[int, Dict[str, str]],
stdouts: Dict[int, str],
stderrs: Dict[int, str],
tee_stdouts: Dict[int, str],
tee_stderrs: Dict[int, str],
error_files: Dict[int, str],
):
self.name = name
# validate that all mappings have the same number of keys and
# all local ranks are accounted for
nprocs = len(args)
_validate_full_rank(stdouts, nprocs, "stdouts")
_validate_full_rank(stderrs, nprocs, "stderrs")
self.entrypoint = entrypoint
self.args = args
self.envs = envs
self.stdouts = stdouts
self.stderrs = stderrs
self.error_files = error_files
self.nprocs = nprocs
self._stdout_tail = TailLog(name, tee_stdouts, sys.stdout)
self._stderr_tail = TailLog(name, tee_stderrs, sys.stderr)
def start(self) -> None:
"""
Start processes using parameters defined in the constructor.
"""
signal.signal(signal.SIGTERM, _terminate_process_handler)
signal.signal(signal.SIGINT, _terminate_process_handler)
if not IS_WINDOWS:
signal.signal(signal.SIGHUP, _terminate_process_handler)
signal.signal(signal.SIGQUIT, _terminate_process_handler)
self._start()
self._stdout_tail.start()
self._stderr_tail.start()
@abc.abstractmethod
def _start(self) -> None:
"""
Start processes using strategy defined in a particular context.
"""
raise NotImplementedError()
@abc.abstractmethod
def _poll(self) -> Optional[RunProcsResult]:
"""
Polls the run status of the processes running under this context.
This method follows an "all-or-nothing" policy and returns
a ``RunProcessResults`` object if either all processes complete
successfully or any process fails. Returns ``None`` if
all processes are still running.
"""
raise NotImplementedError()
def wait(self, timeout: float = -1, period: float = 1) -> Optional[RunProcsResult]:
"""
Waits for the specified ``timeout`` seconds, polling every ``period`` seconds
for the processes to be done. Returns ``None`` if the processes are still running
on timeout expiry. Negative timeout values are interpreted as "wait-forever".
A timeout value of zero simply queries the status of the processes (e.g. equivalent
to a poll).
..note: Multiprocesing library registers SIGTERM and SIGINT signal handlers that raise
``SignalException`` when the signals received. It is up to the consumer of the code
to properly handle the exception. It is important not to swallow the exception otherwise
the process would not terminate. Example of the typical workflow can be:
.. code-block:: python
pc = start_processes(...)
try:
pc.wait(1)
.. do some other work
except SignalException as e:
pc.shutdown(e.sigval, timeout=30)
If SIGTERM or SIGINT occurs, the code above will try to shutdown child processes by propagating
received signal. If child processes will not terminate in the timeout time, the process will send
the SIGKILL.
"""
if timeout == 0:
return self._poll()
if timeout < 0:
timeout = sys.maxsize
expiry = time.time() + timeout
while time.time() < expiry:
pr = self._poll()
if pr:
return pr
time.sleep(period)
return None
@abc.abstractmethod
def pids(self) -> Dict[int, int]:
"""
Returns pids of processes mapped by their respective local_ranks
"""
raise NotImplementedError()
@abc.abstractmethod
def _close(self, death_sig: signal.Signals, timeout: int = 30) -> None:
r"""
Terminates all processes managed by this context and cleans up any
meta resources (e.g. redirect, error_file files).
"""
raise NotImplementedError()
def close(
self, death_sig: Optional[signal.Signals] = None, timeout: int = 30
) -> None:
r"""
Terminates all processes managed by this context and cleans up any
meta resources (e.g. redirect, error_file files).
Args:
death_sig: Death signal to terminate porcesses.
timeout: Time to wait for processes to finish, if process is
still alive after this time, it will be terminated via SIGKILL.
"""
if not death_sig:
death_sig = _get_default_signal()
self._close(death_sig=death_sig, timeout=timeout)
if self._stdout_tail:
self._stdout_tail.stop()
if self._stderr_tail:
self._stderr_tail.stop()
def get_std_cm(std_rd: str, redirect_fn):
if IS_WINDOWS or IS_MACOS or not std_rd:
return nullcontext()
else:
return redirect_fn(std_rd)
def _wrap(
local_rank: int,
fn: Callable,
args: Dict[int, Tuple],
envs: Dict[int, Dict[str, str]],
stdout_redirects: Dict[int, str], # redirect file for stdout (to console if None)
stderr_redirects: Dict[int, str], # redirect file for stderr (to console if None)
ret_vals: Dict[int, mp.SimpleQueue],
queue_finished_reading_event: synchronize.Event,
) -> None:
# get the per-rank params up front so we fail fast if no mapping is found
args_ = args[local_rank]
env_ = envs[local_rank]
ret_val_ = ret_vals[local_rank]
stdout_rd = stdout_redirects[local_rank]
stderr_rd = stderr_redirects[local_rank]
stdout_cm = get_std_cm(stdout_rd, redirect_stdout)
stderr_cm = get_std_cm(stderr_rd, redirect_stderr)
for k, v in env_.items():
os.environ[k] = v
with stdout_cm, stderr_cm:
ret = record(fn)(*args_)
ret_val_.put(ret)
queue_finished_reading_event.wait()
class MultiprocessContext(PContext):
"""
``PContext`` holding worker processes invoked as a function.
"""
def __init__(
self,
name: str,
entrypoint: Callable,
args: Dict[int, Tuple],
envs: Dict[int, Dict[str, str]],
stdouts: Dict[int, str],
stderrs: Dict[int, str],
tee_stdouts: Dict[int, str],
tee_stderrs: Dict[int, str],
error_files: Dict[int, str],
start_method: str,
):
super().__init__(
name,
entrypoint,
args,
envs,
stdouts,
stderrs,
tee_stdouts,
tee_stderrs,
error_files,
)
self.start_method = start_method
# each ret_val queue will always contain a single element.
self._ret_vals = {
local_rank: mp.get_context(self.start_method).SimpleQueue()
for local_rank in range(self.nprocs)
}
# see comments in ``join()`` for what this is
self._return_values: Dict[int, Any] = {}
self._pc: Optional[mp.ProcessContext] = None
# Note: set method should ONLY be invoked for the use case when all processes finished
# successfully. If any process died on event.wait() calling set() method will deadlock.
self._worker_finished_event = mp.get_context(self.start_method).Event()
def _start(self):
if self._pc:
raise ValueError(
"The process context already initialized."
" Most likely the start method got called twice."
)
self._pc = mp.start_processes(
fn=_wrap,
args=(
self.entrypoint,
self.args,
self.envs,
self.stdouts,
self.stderrs,
self._ret_vals,
self._worker_finished_event,
),
nprocs=self.nprocs,
join=False,
daemon=False,
start_method=self.start_method,
)
def _is_done(self) -> bool:
return len(self._return_values) == self.nprocs
def _poll(self) -> Optional[RunProcsResult]:
assert self._pc is not None # assertion for mypy type checker
try:
# torch.mp.ProcessContext Throws an Exception if some/all of
# worker processes failed
# timeout < 0 checks worker status and return immediately
# Join will never return success since we use synchronize.Event to wait
# for all processes to finish.
self._pc.join(-1)
# IMPORTANT: we use multiprocessing.Queue to carry worker return values
# back to the parent, the worker process will wait before terminating
# until all the buffered items are fed by the feeder thread to the underlying
# pipe. Hence to prevent deadlocks on large return values,
# we opportunistically try queue.get on each join call
# See: https://docs.python.org/2/library/multiprocessing.html#all-platforms
for local_rank in range(0, self.nprocs):
return_queue = self._ret_vals[local_rank]
if not return_queue.empty():
# save the return values temporarily into a member var
self._return_values[local_rank] = return_queue.get()
if self._is_done():
# we should ALWAYS have ALL the return values when all the processes are done
self._worker_finished_event.set()
# Wait untill all processes are finished. At this point workers finished executing
# user function
self._pc.join()
_validate_full_rank(
self._return_values, self.nprocs, "return_value queue"
)
self.close()
return RunProcsResult(
return_values=self._return_values,
stdouts=self.stdouts,
stderrs=self.stderrs,
)
else:
return None
except (mp.ProcessRaisedException, mp.ProcessExitedException) as e:
failed_local_rank = e.error_index
# entrypoint for MultiprocessContext will always be a Callable
fn_name = self.entrypoint.__qualname__ # type: ignore[union-attr]
failed_proc = self._pc.processes[failed_local_rank]
error_filepath = self.error_files[failed_local_rank]
log.error(
f"failed (exitcode: {failed_proc.exitcode})"
f" local_rank: {failed_local_rank} (pid: {e.pid})"
f" of fn: {fn_name} (start_method: {self.start_method})",
exc_info=True,
)
self.close()
return RunProcsResult(
failures={
failed_local_rank: ProcessFailure(
local_rank=failed_local_rank,
pid=e.pid,
exitcode=failed_proc.exitcode,
error_file=error_filepath,
)
},
stdouts=self.stdouts,
stderrs=self.stderrs,
)
def pids(self) -> Dict[int, int]:
assert self._pc is not None # assertion for mypy type checking
return {local_rank: pid for local_rank, pid in enumerate(self._pc.pids())}
def _close(self, death_sig: signal.Signals, timeout: int = 30) -> None:
if not self._pc:
return
for proc in self._pc.processes:
if proc.is_alive():
log.warning(f"Closing process {proc.pid} via signal {death_sig.name}")
try:
os.kill(proc.pid, death_sig)
except ProcessLookupError:
# If the process exited because of some reason,
# `ProcessLookupError` will be rasied, it is safe to ignore it.
pass
end = time.monotonic() + timeout
for proc in self._pc.processes:
time_to_wait = end - time.monotonic()
if time_to_wait <= 0:
break
proc.join(time_to_wait)
for proc in self._pc.processes:
if proc.is_alive():
log.warning(
f"Unable to shutdown process {proc.pid} via {death_sig}, forcefully exitting via {_get_kill_signal()}"
)
try:
os.kill(proc.pid, _get_kill_signal())
except ProcessLookupError:
# If the process exited because of some reason,
# `ProcessLookupError` will be rasied, it is safe to ignore it.
pass
proc.join()
class SubprocessHandler:
"""
Convenience wrapper around python's ``subprocess.Popen``. Keeps track of
meta-objects associated to the process (e.g. stdout and stderr redirect fds).
"""
def __init__(
self,
entrypoint: str,
args: Tuple,
env: Dict[str, str],
stdout: str,
stderr: str,
):
self._stdout = open(stdout, "w") if stdout else None
self._stderr = open(stderr, "w") if stderr else None
# inherit parent environment vars
env_vars = os.environ.copy()
env_vars.update(env)
args_str = (entrypoint, *[str(e) for e in args])
self.proc: subprocess.Popen = self._popen(args_str, env_vars)
def _popen(self, args: Tuple, env: Dict[str, str]) -> subprocess.Popen:
return subprocess.Popen(
# pyre-fixme[6]: Expected `Union[typing.Sequence[Union[_PathLike[bytes],
# _PathLike[str], bytes, str]], bytes, str]` for 1st param but got
# `Tuple[str, *Tuple[Any, ...]]`.
args=args,
env=env,
stdout=self._stdout,
stderr=self._stderr,
)
def close(self, death_sig: Optional[signal.Signals] = None) -> None:
if not death_sig:
death_sig = _get_default_signal()
self.proc.send_signal(death_sig)
if self._stdout:
self._stdout.close()
if self._stderr:
self._stderr.close()
class SubprocessContext(PContext):
"""
``PContext`` holding worker processes invoked as a binary.
"""
def __init__(
self,
name: str,
entrypoint: str,
args: Dict[int, Tuple],
envs: Dict[int, Dict[str, str]],
stdouts: Dict[int, str],
stderrs: Dict[int, str],
tee_stdouts: Dict[int, str],
tee_stderrs: Dict[int, str],
error_files: Dict[int, str],
):
super().__init__(
name,
entrypoint,
args,
envs,
stdouts,
stderrs,
tee_stdouts,
tee_stderrs,
error_files,
)
# state vector; _vdone[local_rank] -> is local_rank finished or not
self._running_local_ranks: Set[int] = set(range(self.nprocs))
self._failures: Dict[int, ProcessFailure] = {}
self.subprocess_handlers: Dict[int, SubprocessHandler] = {}
def _start(self):
if self.subprocess_handlers:
raise ValueError(
"The subprocess handlers already initialized. Most likely the start method got called twice."
)
self.subprocess_handlers = {
local_rank: SubprocessHandler(
entrypoint=self.entrypoint, # type: ignore[arg-type] # entrypoint is always a str
args=self.args[local_rank],
env=self.envs[local_rank],
stdout=self.stdouts[local_rank],
stderr=self.stderrs[local_rank],
)
for local_rank in range(self.nprocs)
}
def _poll(self) -> Optional[RunProcsResult]:
done_local_ranks = set()
for local_rank in self._running_local_ranks:
handler = self.subprocess_handlers[local_rank]
exitcode = handler.proc.poll()
if exitcode is not None:
done_local_ranks.add(local_rank)
if exitcode != 0: # failed or signaled
self._failures[local_rank] = ProcessFailure(
local_rank=local_rank,
pid=handler.proc.pid,
exitcode=exitcode,
error_file=self.error_files[local_rank],
)
# else: --> succeeded; nothing to do
self._running_local_ranks.difference_update(done_local_ranks)
# if ALL procs are finished or ANY have failed
if not self._running_local_ranks or self._failures:
self.close() # terminate all running procs
result = RunProcsResult(
failures=self._failures,
stdouts=self.stdouts,
stderrs=self.stderrs,
)
if result.is_failed():
first_failure = min(result.failures.values(), key=lambda f: f.timestamp)
log.error(
f"failed (exitcode: {first_failure.exitcode})"
f" local_rank: {first_failure.local_rank} (pid: {first_failure.pid})"
f" of binary: {self.entrypoint}"
)
else:
# Populate return with dummy values. This provides consistency with MultiprocessingHandler
result.return_values = {
local_rank: None for local_rank in range(self.nprocs)
}
return result
else: # there are no failures and procs still running
return None
def pids(self) -> Dict[int, int]:
return {
local_rank: sh.proc.pid
for local_rank, sh in self.subprocess_handlers.items()
}
def _close(self, death_sig: signal.Signals, timeout: int = 30) -> None:
if not self.subprocess_handlers:
return
for handler in self.subprocess_handlers.values():
if handler.proc.poll() is None:
log.warning(
f"Sending process {handler.proc.pid} closing signal {death_sig.name}"
)
handler.close(death_sig=death_sig)
end = time.monotonic() + timeout
for handler in self.subprocess_handlers.values():
time_to_wait = end - time.monotonic()
if time_to_wait <= 0:
break
try:
handler.proc.wait(time_to_wait)
except subprocess.TimeoutExpired:
# Ignore the timeout expired exception, since
# the child process will be forcefully terminated via SIGKILL
pass
for handler in self.subprocess_handlers.values():
if handler.proc.poll() is None:
log.warning(
f"Unable to shutdown process {handler.proc.pid} via {death_sig}, forcefully exitting via {_get_kill_signal()}"
)
handler.close(death_sig=_get_kill_signal())
handler.proc.wait()
| pytorch-master | torch/distributed/elastic/multiprocessing/api.py |
# !/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# Taken and modified from original source:
# https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/
import ctypes
import logging
import os
import sys
from contextlib import contextmanager
from functools import partial
IS_WINDOWS = sys.platform == "win32"
IS_MACOS = sys.platform == "darwin"
logger = logging.getLogger(__name__)
def get_libc():
if IS_WINDOWS or IS_MACOS:
logger.warning(
"NOTE: Redirects are currently not supported in Windows or MacOs."
)
return None
else:
return ctypes.CDLL("libc.so.6")
libc = get_libc()
def _c_std(stream: str):
return ctypes.c_void_p.in_dll(libc, stream)
def _python_std(stream: str):
return {"stdout": sys.stdout, "stderr": sys.stderr}[stream]
_VALID_STD = {"stdout", "stderr"}
@contextmanager
def redirect(std: str, to_file: str):
"""
Redirects ``std`` (one of ``"stdout"`` or ``"stderr"``) to a file
in the path specified by ``to_file``. This method redirects the
underlying std file descriptor (not just pyton's ``sys.stdout|stderr``).
See usage for details.
Directory of ``dst_filename`` is assumed to exist and the destination file
is overwritten if it already exists.
.. note:: Due to buffering cross source writes are not guaranteed to
appear in wall-clock order. For instance in the example below
it is possible for the C-outputs to appear before the python
outputs in the log file.
Usage:
::
# syntactic-sugar for redirect("stdout", "tmp/stdout.log")
with redirect_stdout("/tmp/stdout.log"):
print("python stdouts are redirected")
libc = ctypes.CDLL("libc.so.6")
libc.printf(b"c stdouts are also redirected"
os.system("echo system stdouts are also redirected")
print("stdout restored")
"""
if std not in _VALID_STD:
raise ValueError(
f"unknown standard stream <{std}>, must be one of {_VALID_STD}"
)
c_std = _c_std(std)
python_std = _python_std(std)
std_fd = python_std.fileno()
def _redirect(dst):
libc.fflush(c_std)
python_std.flush()
os.dup2(dst.fileno(), std_fd)
with os.fdopen(os.dup(std_fd)) as orig_std, open(to_file, mode="w+b") as dst:
_redirect(dst)
yield
_redirect(orig_std)
redirect_stdout = partial(redirect, "stdout")
redirect_stderr = partial(redirect, "stderr")
| pytorch-master | torch/distributed/elastic/multiprocessing/redirects.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# Multiprocessing error-reporting module
from torch.distributed.elastic.multiprocessing.errors.error_handler import ErrorHandler
__all__ = ['get_error_handler']
def get_error_handler():
return ErrorHandler()
| pytorch-master | torch/distributed/elastic/multiprocessing/errors/handlers.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
Each host in a distributed PyTorch job runs with a single TorchElastic agent,
and multiple workers (as children processes of the TorchElastic agent).
Since the workers are user-provided (your PyTorch script/job), TorchElastic
has a way to propagate errors on the trainers through the agent and up to the
scheduler, which ultimately informs the end-user about the state of the job
and applies any retry policies.
TorchElastic categorizes errors into 3 categories:
+----------------+----------------+--------------------------------------------------------------+
| Category | Sub-Category | Description |
+================+================+==============================================================+
| User Error | Input Error | invalid inputs to TorchElastic APIs (e.g. min > max nodes) |
| +----------------+--------------------------------------------------------------+
| | Worker Failure | any failures on the worker child process |
+----------------+----------------+--------------------------------------------------------------+
| Platform Error | n/a | failures caused by the agent |
+----------------+----------------+--------------------------------------------------------------+
| Infra Error | n/a | failures outside the domain of the agent and workers |
| | | (e.g. host failures) |
+----------------+----------------+--------------------------------------------------------------+
All errors other than "Worker Failure" are either raised canonically from the
agent process or implicitly or explicitly crash the agent process. So the
standard language (python) provided exception handling strategies apply.
Worker Failures are special because the exception/failure originates on a different
process from the agent so the error needs to be propagated inter-process
(e.g. the agent cannot simply ``try-catch`` an exception raised on the worker process).
TorchElastic agents use :func:`torch.distributed.elastic.multiprocessing.start_processes`
to launch the workers which has a simple file based inter-process error propagation
built-in.
Any function or binary entrypoint decorated with :func:`record`
will write uncaught exceptions (with the trace information) to a file specified by the
environment variable ``TORCHELASTIC_ERROR_FILE``. The parent process (e.g. agent)
sets this env var on each child it launches, then aggregates the error files for all
children, and propagates the one with the **smallest** timestamp (e.g. the **first** error).
"""
import json
import os
import signal
import socket
import time
import warnings
from dataclasses import dataclass, field
from datetime import datetime
from functools import wraps
from string import Template
from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar
from torch.distributed.elastic.utils.logging import get_logger
from .error_handler import ErrorHandler # noqa: F401
from .handlers import get_error_handler # noqa: F401
log = get_logger()
JSON = Dict
_EMPTY_ERROR_DATA = {"message": "<NONE>"}
_NOT_AVAILABLE = "<N/A>"
T = TypeVar("T")
@dataclass
class ProcessFailure:
"""
Represents the failed process result. When the worker process fails,
it may record failure root cause into the file.
Tries to read the failure timestamp from the provided ``error_file``,
if the ``error_file`` does not exist, the timestamp is the current
timestamp (seconds since epoch).
The ``message`` field is a concise explanation of the failure. If
the error file exists then the message is obtained from the error file.
Otherwise one is generated based on the failure signature.
.. note:: It is assumed that the ``error_file`` is written by
``torch.distributed.elastic.multiprocessing.errors.error_handler.ErrorHandler``.
Otherwise the behavior is undefined.
"""
local_rank: int
pid: int
exitcode: int
error_file: str
error_file_data: JSON = field(init=False)
message: str = field(init=False)
timestamp: int = field(init=False)
def __post_init__(self):
self.error_file_data = _EMPTY_ERROR_DATA
if os.path.isfile(self.error_file):
try:
with open(self.error_file, "r") as fp:
self.error_file_data = json.load(fp)
log.debug(
f"User process failed with error data: {json.dumps(self.error_file_data, indent=2)}"
)
self.message, self.timestamp = self._get_error_data(
self.error_file_data
)
except Exception:
log.exception(f"Failed to parse reply file: {self.error_file}")
raise
else:
self._set_no_reply_file()
# make up an informative message if not already present
if not self.message:
# signals typically do not generate an error file message
if self.exitcode < 0:
self.message = (
f"Signal {-self.exitcode} ({self.signal_name()})"
f" received by PID {self.pid}"
)
else:
self.message = "To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html"
def _get_error_data(self, error_file_data: Dict[str, Any]) -> Tuple[str, int]:
message = error_file_data["message"]
if isinstance(message, str):
timestamp = int(error_file_data.get("timestamp", 0))
else:
timestamp = int(message["extraInfo"]["timestamp"])
return (message, timestamp)
def _set_no_reply_file(self):
self.error_file = _NOT_AVAILABLE
self.error_file_data = _EMPTY_ERROR_DATA
self.message = ""
self.timestamp = int(time.time())
def signal_name(self) -> str:
if self.exitcode < 0:
return signal.Signals(-self.exitcode).name
else:
return _NOT_AVAILABLE
def timestamp_isoformat(self):
"""
Returns timestamp in ISO format (YYYY-MM-DD_HH:MM:SS)
"""
return datetime.fromtimestamp(self.timestamp).isoformat(sep="_")
GlobalRank = int
_FAILURE_FORMAT_TEMPLATE = """[${idx}]:
time : ${time}
host : ${hostname}
rank : ${rank} (local_rank: ${local_rank})
exitcode : ${exitcode} (pid: ${pid})
error_file: ${error_file}
traceback : ${message}"""
# extra new lines before and after are intentional
_MSG_FORMAT_TEMPLATE = """
${boarder}
${title}
${section}
Failures:
${other_failures}
${section}
Root Cause (first observed failure):
${root_failure}
${boarder}"""
class ChildFailedError(Exception):
"""
Special exception type that can be raised from a function annotated with the
``@record`` decorator to have the child process' (root exception) propagate
up the stack as-is (e.g. without being wrapped in the parent's traceback).
Useful in cases where the parent is a simple nanny process
and the child (worker) processes are actually doing meaningful compute.
In this case, errors typically occur on the child process as the parent
is not doing anything non-trivial, and child errors should be propagated
to the scheduler for accurate root cause diagnostics.
.. note:: The propagation relies on error files rather than exception handling to
support both function and binary launches.
Example:
::
# process tree on a host (container)
0: scheduler-init-process:
|- 1: torchelastic_agent:
|- 2: trainer_0 (ok)
|- 3: trainer_1 (fail) -> error.json
|- ...
|- n+2: trainer_n (ok)
|- n+3: other processes
|- ...
In the example above, trainer 1's failure (written into error.json) is
the root cause and should be reported to the scheduler's init process.
The torchelastic agent raises a ``ChildFailedError("trainer", {1: "trainer_1/error.json"})``
upon detecting trainer 1's failure which would propagate the contents
of trainer 1's error file to the scheduler's init process.
"""
def __init__(self, name: str, failures: Dict[GlobalRank, ProcessFailure]):
self.name = name
self.failures = failures
assert (
self.failures
) # does not make sense to create a ChildFaileError with no failures
super().__init__(self.format_msg())
def get_first_failure(self) -> Tuple[GlobalRank, ProcessFailure]:
rank = min(self.failures.keys(), key=lambda r: self.failures[r].timestamp)
return rank, self.failures[rank]
def format_msg(self, boarder_delim="=", section_delim="-"):
title = f"{self.name} FAILED"
root_rank, root_failure = self.get_first_failure()
root_failure_fmt: str = ""
other_failures_fmt: List[str] = []
width = len(title)
for idx, (rank, failure) in enumerate(self.failures.items()):
fmt, w = self._format_failure(idx, rank, failure)
width = max(width, w)
if rank == root_rank:
root_failure_fmt = fmt
else:
other_failures_fmt.append(fmt)
# upper boundary on width
width = min(width, 60)
return Template(_MSG_FORMAT_TEMPLATE).substitute(
boarder=boarder_delim * width,
title=title,
section=section_delim * width,
root_failure=root_failure_fmt,
other_failures="\n".join(other_failures_fmt or [" <NO_OTHER_FAILURES>"]),
)
def _format_failure(
self, idx: int, rank: int, failure: ProcessFailure
) -> Tuple[str, int]:
# failure.message is either a str (when the failure does not generate a traceback - e.g. signals)
# or a dict (json) of the form
# {"message": $ERROR_MSG, "extraInfo": {"py_callstack": $TRACEBACK, timestamp: $TS}}
# so the display logic is:
# 1. if failure.message is not a dict (it is a str) just show it as is
# 2. else try to get the traceback (py_callstack)
# 3. if the traceback is not there, use the message
# 4. if the message is not there show <N/A>
msg = failure.message
if isinstance(failure.message, dict):
msg = (
failure.message.get("extraInfo", {})
.get("py_callstack", failure.message.get("message", "<N/A>"))
.replace("\n", "\n ") # to properly indent the traceback
)
fmt = Template(_FAILURE_FORMAT_TEMPLATE).substitute(
idx=idx,
time=failure.timestamp_isoformat(),
hostname=socket.getfqdn(),
rank=rank,
local_rank=failure.local_rank,
exitcode=failure.exitcode,
pid=failure.pid,
error_file=failure.error_file,
message=msg,
)
width = 0
for line in fmt.split("\n"):
width = max(width, len(line))
return fmt, width
def record(
fn: Callable[..., T], error_handler: Optional[ErrorHandler] = None
) -> Callable[..., T]:
"""
Syntactic sugar to record errors/exceptions that happened in the decorated
function using the provided ``error_handler``.
Using this decorator is equivalent to:
::
error_handler = get_error_handler()
error_handler.initialize()
try:
foobar()
except ChildFailedError as e:
_, failure = e.get_first_failure()
error_handler.dump_error_file(failure.error_file, failure.exitcode)
raise
except Exception as e:
error_handler.record(e)
raise
.. important:: use this decorator once per process at the top level method,
typically this is the main method.
Example
::
@record
def main():
pass
if __name__=="__main__":
main()
"""
if not error_handler:
error_handler = get_error_handler()
def wrap(f):
@wraps(f)
def wrapper(*args, **kwargs):
assert error_handler is not None # assertion for mypy type checker
error_handler.initialize()
try:
return f(*args, **kwargs)
except ChildFailedError as e:
rank, failure = e.get_first_failure()
if failure.error_file != _NOT_AVAILABLE:
error_handler.dump_error_file(failure.error_file, failure.exitcode)
else:
log.info(
(
f"local_rank {rank} FAILED with no error file."
f" Decorate your entrypoint fn with @record for traceback info."
f" See: https://pytorch.org/docs/stable/elastic/errors.html"
)
)
raise
except Exception as e:
error_handler.record_exception(e)
raise
return wrapper
return wrap(fn)
| pytorch-master | torch/distributed/elastic/multiprocessing/errors/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import faulthandler
import json
import logging
import os
import time
import traceback
import warnings
from typing import Any, Dict, Optional
__all__ = ['ErrorHandler']
log = logging.getLogger(__name__)
class ErrorHandler:
"""
Writes the provided exception object along with some other metadata about
the error in a structured way in JSON format to an error file specified by the
environment variable: ``TORCHELASTIC_ERROR_FILE``. If this environment
variable is not set, then simply logs the contents of what would have been
written to the error file.
This handler may be subclassed to customize the handling of the error.
Subclasses should override ``initialize()`` and ``record_exception()``.
"""
def _get_error_file_path(self) -> Optional[str]:
"""
Returns the error file path. May return ``None`` to have the
structured error be logged only.
"""
return os.environ.get("TORCHELASTIC_ERROR_FILE", None)
def initialize(self) -> None:
"""
Called prior to running code that we wish to capture errors/exceptions.
Typically registers signal/fault handlers. Users can override this
function to add custom initialization/registrations that aid in
propagation/information of errors/signals/exceptions/faults.
"""
try:
faulthandler.enable(all_threads=True)
except Exception as e:
warnings.warn(f"Unable to enable fault handler. {type(e).__name__}: {e}")
def _write_error_file(self, file_path: str, error_msg: str) -> None:
"""
Writes error message to the file.
"""
try:
with open(file_path, "w") as fp:
fp.write(error_msg)
except Exception as e:
warnings.warn(f"Unable to write error to file. {type(e).__name__}: {e}")
def record_exception(self, e: BaseException) -> None:
"""
Writes a structured information about the exception into an error file in
JSON format. If the error file cannot be determined, then logs the content
that would have been written to the error file.
"""
file = self._get_error_file_path()
if file:
data = {
"message": {
"message": f"{type(e).__name__}: {e}",
"extraInfo": {
"py_callstack": traceback.format_exc(),
"timestamp": str(int(time.time())),
},
}
}
with open(file, "w") as fp:
json.dump(data, fp)
def override_error_code_in_rootcause_data(
self,
rootcause_error_file: str,
rootcause_error: Dict[str, Any],
error_code: int = 0,
):
"""
Modify the rootcause_error read from the file, to correctly set the exit code.
"""
if "message" not in rootcause_error:
log.warning(
f"child error file ({rootcause_error_file}) does not have field `message`. \n"
f"cannot override error code: {error_code}"
)
elif isinstance(rootcause_error["message"], str):
log.warning(
f"child error file ({rootcause_error_file}) has a new message format. \n"
f"skipping error code override"
)
else:
rootcause_error["message"]["errorCode"] = error_code
def dump_error_file(self, rootcause_error_file: str, error_code: int = 0):
"""
Dumps parent error file from child process's root cause error and error code.
"""
with open(rootcause_error_file, "r") as fp:
rootcause_error = json.load(fp)
# Override error code since the child process cannot capture the error code if it
# is terminated by singals like SIGSEGV.
if error_code:
self.override_error_code_in_rootcause_data(rootcause_error_file, rootcause_error, error_code)
log.debug(
f"child error file ({rootcause_error_file}) contents:\n"
f"{json.dumps(rootcause_error, indent=2)}"
)
my_error_file = self._get_error_file_path()
if my_error_file:
# Guard against existing error files
# This can happen when the child is created using multiprocessing
# and the same env var (TORCHELASTIC_ERROR_FILE) is used on the
# parent and child to specify the error files (respectively)
# because the env vars on the child is set in the wrapper function
# and by default the child inherits the parent's env vars, if the child
# process receives a signal before the wrapper function kicks in
# and the signal handler writes to the error file, then the child
# will write to the parent's error file. In this case just log the
# original error file contents and overwrite the error file.
self._rm(my_error_file)
self._write_error_file(my_error_file, json.dumps(rootcause_error))
log.info(f"dumped error file to parent's {my_error_file}")
else:
log.error(
f"no error file defined for parent, to copy child error file ({rootcause_error_file})"
)
def _rm(self, my_error_file):
if os.path.isfile(my_error_file):
# Log the contents of the original file.
with open(my_error_file, "r") as fp:
try:
original = json.dumps(json.load(fp), indent=2)
log.warning(
f"{my_error_file} already exists"
f" and will be overwritten."
f" Original contents:\n{original}"
)
except json.decoder.JSONDecodeError as err:
log.warning(
f"{my_error_file} already exists"
f" and will be overwritten."
f" Unable to load original contents:\n"
)
os.remove(my_error_file)
| pytorch-master | torch/distributed/elastic/multiprocessing/errors/error_handler.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import multiprocessing as mp
import os
import signal
import time
from queue import Empty
from typing import Any, Dict, List, Set, Tuple
from .api import RequestQueue, TimerClient, TimerRequest, TimerServer
__all__ = ['LocalTimerClient', 'MultiprocessingRequestQueue', 'LocalTimerServer']
class LocalTimerClient(TimerClient):
"""
Client side of ``LocalTimerServer``. This client is meant to be used
on the same host that the ``LocalTimerServer`` is running on and uses
pid to uniquely identify a worker. This is particularly useful in situations
where one spawns a subprocess (trainer) per GPU on a host with multiple
GPU devices.
"""
def __init__(self, mp_queue):
super().__init__()
self._mp_queue = mp_queue
def acquire(self, scope_id, expiration_time):
pid = os.getpid()
acquire_request = TimerRequest(pid, scope_id, expiration_time)
self._mp_queue.put(acquire_request)
def release(self, scope_id):
pid = os.getpid()
release_request = TimerRequest(pid, scope_id, -1)
self._mp_queue.put(release_request)
class MultiprocessingRequestQueue(RequestQueue):
"""
A ``RequestQueue`` backed by python ``multiprocessing.Queue``
"""
def __init__(self, mp_queue: mp.Queue):
super().__init__()
self._mp_queue = mp_queue
def size(self) -> int:
return self._mp_queue.qsize()
def get(self, size, timeout: float) -> List[TimerRequest]:
requests = []
wait = timeout
for _ in range(0, size):
start = time.time()
try:
r = self._mp_queue.get(block=True, timeout=wait)
except Empty:
break
requests.append(r)
wait = wait - (time.time() - start)
if wait <= 0:
break
return requests
class LocalTimerServer(TimerServer):
"""
Server that works with ``LocalTimerClient``. Clients are expected to be
subprocesses to the parent process that is running this server. Each host
in the job is expected to start its own timer server locally and each
server instance manages timers for local workers (running on processes
on the same host).
"""
def __init__(
self, mp_queue: mp.Queue, max_interval: float = 60, daemon: bool = True
):
super().__init__(MultiprocessingRequestQueue(mp_queue), max_interval, daemon)
self._timers: Dict[Tuple[Any, str], TimerRequest] = {}
def register_timers(self, timer_requests: List[TimerRequest]) -> None:
for request in timer_requests:
pid = request.worker_id
scope_id = request.scope_id
expiration_time = request.expiration_time
# negative expiration is a proxy for a release call
if expiration_time < 0:
self._timers.pop((pid, scope_id), None)
else:
self._timers[(pid, scope_id)] = request
def clear_timers(self, worker_ids: Set[int]) -> None:
for (pid, scope_id) in list(self._timers.keys()):
if pid in worker_ids:
self._timers.pop((pid, scope_id))
def get_expired_timers(self, deadline: float) -> Dict[Any, List[TimerRequest]]:
# pid -> [timer_requests...]
expired_timers: Dict[Any, List[TimerRequest]] = {}
for request in self._timers.values():
if request.expiration_time <= deadline:
expired_scopes = expired_timers.setdefault(request.worker_id, [])
expired_scopes.append(request)
return expired_timers
def _reap_worker(self, worker_id: int) -> bool:
try:
os.kill(worker_id, signal.SIGKILL)
return True
except ProcessLookupError:
logging.info(f"Process with pid={worker_id} does not exist. Skipping")
return True
except Exception as e:
logging.error(f"Error terminating pid={worker_id}", exc_info=e)
return False
| pytorch-master | torch/distributed/elastic/timer/local_timer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
Expiration timers are set up on the same process as the agent and
used from your script to deal with stuck workers. When you go into
a code-block that has the potential to get stuck you can acquire
an expiration timer, which instructs the timer server to kill the
process if it does not release the timer by the self-imposed expiration
deadline.
Usage::
import torchelastic.timer as timer
import torchelastic.agent.server as agent
def main():
start_method = "spawn"
message_queue = mp.get_context(start_method).Queue()
server = timer.LocalTimerServer(message, max_interval=0.01)
server.start() # non-blocking
spec = WorkerSpec(
fn=trainer_func,
args=(message_queue,),
...<OTHER_PARAMS...>)
agent = agent.LocalElasticAgent(spec, start_method)
agent.run()
def trainer_func(message_queue):
timer.configure(timer.LocalTimerClient(message_queue))
with timer.expires(after=60): # 60 second expiry
# do some work
In the example above if ``trainer_func`` takes more than 60 seconds to
complete, then the worker process is killed and the agent retries the worker group.
"""
from .api import TimerClient, TimerRequest, TimerServer, configure, expires # noqa: F401
from .local_timer import LocalTimerClient, LocalTimerServer # noqa: F401
| pytorch-master | torch/distributed/elastic/timer/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import abc
import logging
import threading
import time
from contextlib import contextmanager
from inspect import getframeinfo, stack
from typing import Any, Dict, List, Optional, Set
__all__ = ['TimerRequest', 'TimerClient', 'RequestQueue', 'TimerServer', 'configure', 'expires']
class TimerRequest:
"""
Data object representing a countdown timer acquisition and release
that is used between the ``TimerClient`` and ``TimerServer``.
A negative ``expiration_time`` should be interpreted as a "release"
request.
.. note:: the type of ``worker_id`` is implementation specific.
It is whatever the TimerServer and TimerClient implementations
have on to uniquely identify a worker.
"""
__slots__ = ["worker_id", "scope_id", "expiration_time"]
def __init__(self, worker_id: Any, scope_id: str, expiration_time: float):
self.worker_id = worker_id
self.scope_id = scope_id
self.expiration_time = expiration_time
def __eq__(self, other):
if isinstance(other, TimerRequest):
return (
self.worker_id == other.worker_id
and self.scope_id == other.scope_id
and self.expiration_time == other.expiration_time
)
return False
class TimerClient(abc.ABC):
"""
Client library to acquire and release countdown timers by communicating
with the TimerServer.
"""
@abc.abstractmethod
def acquire(self, scope_id: str, expiration_time: float) -> None:
"""
Acquires a timer for the worker that holds this client object
given the scope_id and expiration_time. Typically registers
the timer with the TimerServer.
"""
pass
@abc.abstractmethod
def release(self, scope_id: str):
"""
Releases the timer for the ``scope_id`` on the worker this
client represents. After this method is
called, the countdown timer on the scope is no longer in effect.
"""
pass
class RequestQueue(abc.ABC):
"""
Consumer queue holding timer acquisition/release requests
"""
@abc.abstractmethod
def size(self) -> int:
"""
Returns the size of the queue at the time this method is called.
Note that by the time ``get`` is called the size of the queue
may have increased. The size of the queue should not decrease
until the ``get`` method is called. That is, the following assertion
should hold:
size = q.size()
res = q.get(size, timeout=0)
assert size == len(res)
-- or --
size = q.size()
res = q.get(size * 2, timeout=1)
assert size <= len(res) <= size * 2
"""
pass
@abc.abstractmethod
def get(self, size: int, timeout: float) -> List[TimerRequest]:
"""
Gets up to ``size`` number of timer requests in a blocking fashion
(no more than ``timeout`` seconds).
"""
pass
class TimerServer(abc.ABC):
"""
Entity that monitors active timers and expires them
in a timely fashion. This server is responsible for
reaping workers that have expired timers.
"""
def __init__(
self, request_queue: RequestQueue, max_interval: float, daemon: bool = True
):
"""
:param request_queue: Consumer ``RequestQueue``
:param max_interval: max time (in seconds) to wait
for an item in the request_queue
:param daemon: whether to run the watchdog thread as a daemon
"""
super().__init__()
self._request_queue = request_queue
self._max_interval = max_interval
self._daemon = daemon
self._watchdog_thread: Optional[threading.Thread] = None
self._stop_signaled = False
@abc.abstractmethod
def register_timers(self, timer_requests: List[TimerRequest]) -> None:
"""
Processes the incoming timer requests and registers them with the server.
The timer request can either be a acquire-timer or release-timer request.
Timer requests with a negative expiration_time should be interpreted
as a release-timer request.
"""
pass
@abc.abstractmethod
def clear_timers(self, worker_ids: Set[Any]) -> None:
"""
Clears all timers for the given ``worker_ids``.
"""
pass
@abc.abstractmethod
def get_expired_timers(self, deadline: float) -> Dict[str, List[TimerRequest]]:
"""
Returns all expired timers for each worker_id. An expired timer
is a timer for which the expiration_time is less than or equal to
the provided deadline.
"""
pass
@abc.abstractmethod
def _reap_worker(self, worker_id: Any) -> bool:
"""
Reaps the given worker. Returns True if the worker has been
successfully reaped, False otherwise. If any uncaught exception
is thrown from this method, the worker is considered reaped
and all associated timers will be removed.
"""
def _reap_worker_no_throw(self, worker_id: Any) -> bool:
"""
Wraps ``_reap_worker(worker_id)``, if an uncaught exception is
thrown, then it considers the worker as reaped.
"""
try:
return self._reap_worker(worker_id)
except Exception as e:
logging.error(
"Uncaught exception thrown from _reap_worker(), "
"check that the implementation correctly catches exceptions",
exc_info=e,
)
return True
def _watchdog_loop(self):
while not self._stop_signaled:
try:
self._run_watchdog()
except Exception as e:
logging.error("Error running watchdog", exc_info=e)
def _run_watchdog(self):
batch_size = max(1, self._request_queue.size())
timer_requests = self._request_queue.get(batch_size, self._max_interval)
self.register_timers(timer_requests)
now = time.time()
reaped_worker_ids = set()
for worker_id, expired_timers in self.get_expired_timers(now).items():
logging.info(
f"Reaping worker_id=[{worker_id}]."
f" Expired timers: {self._get_scopes(expired_timers)}"
)
if self._reap_worker_no_throw(worker_id):
logging.info(f"Successfully reaped worker=[{worker_id}]")
reaped_worker_ids.add(worker_id)
else:
logging.error(
f"Error reaping worker=[{worker_id}]. Will retry on next watchdog."
)
self.clear_timers(reaped_worker_ids)
def _get_scopes(self, timer_requests):
return [r.scope_id for r in timer_requests]
def start(self) -> None:
logging.info(
f"Starting {type(self).__name__}..."
f" max_interval={self._max_interval},"
f" daemon={self._daemon}"
)
self._watchdog_thread = threading.Thread(
target=self._watchdog_loop, daemon=self._daemon
)
logging.info("Starting watchdog thread...")
self._watchdog_thread.start()
def stop(self) -> None:
logging.info(f"Stopping {type(self).__name__}")
self._stop_signaled = True
if self._watchdog_thread:
logging.info("Stopping watchdog thread...")
self._watchdog_thread.join(self._max_interval)
self._watchdog_thread = None
else:
logging.info("No watchdog thread running, doing nothing")
_timer_client = None
def configure(timer_client: TimerClient):
"""
Configures a timer client. Must be called before using ``expires``.
"""
global _timer_client
_timer_client = timer_client
logging.info(f"Timer client configured to: {type(_timer_client).__name__}")
@contextmanager
def expires(
after: float, scope: Optional[str] = None, client: Optional[TimerClient] = None
):
"""
Acquires a countdown timer that expires in ``after`` seconds from now,
unless the code-block that it wraps is finished within the timeframe.
When the timer expires, this worker is eligible to be reaped. The
exact meaning of "reaped" depends on the client implementation. In
most cases, reaping means to terminate the worker process.
Note that the worker is NOT guaranteed to be reaped at exactly
``time.now() + after``, but rather the worker is "eligible" for being
reaped and the ``TimerServer`` that the client talks to will ultimately
make the decision when and how to reap the workers with expired timers.
Usage::
torch.distributed.elastic.timer.configure(LocalTimerClient())
with expires(after=10):
torch.distributed.all_reduce(...)
"""
if client is None:
if _timer_client is None:
raise RuntimeError("Configure timer client before using coundown timers.")
client = _timer_client
if scope is None:
# grab the caller file + lineno
caller = getframeinfo(stack()[1][0])
scope = f"{caller.filename}#{caller.lineno}"
expiration = time.time() + after
client.acquire(scope, expiration)
try:
yield
finally:
client.release(scope)
| pytorch-master | torch/distributed/elastic/timer/api.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from datetime import timedelta
from typing import List
def get_all(store, rank: int, prefix: str, size: int):
r"""
Given a store and a prefix, the method goes through the array of keys
of the following format: ``{prefix}{idx}``, where idx is in a range
from 0 to size, and tries to retrieve the data.
The Rank0 process waits at the end to make sure all other processes
finished the procedure before exiting.
Usage
::
values = get_all(store, 'torchelastic/data', 3)
value1 = values[0] # retrieves the data for key torchelastic/data0
value2 = values[1] # retrieves the data for key torchelastic/data1
value3 = values[2] # retrieves the data for key torchelastic/data2
"""
data_arr = []
for idx in range(size):
data = store.get(f"{prefix}{idx}")
data_arr.append(data)
store.set(f"{prefix}{rank}.FIN", b"FIN")
if rank == 0:
# Rank0 runs the TCPStore daemon, as a result it needs to exit last.
# Otherwise, the barrier may timeout if rank0 process finished the work
# before other processes finished `get_all` method
for node_rank in range(size):
store.get(f"{prefix}{node_rank}.FIN")
return data_arr
def synchronize(
store,
data: bytes,
rank: int,
world_size: int,
key_prefix: str,
barrier_timeout: float = 300,
) -> List[bytes]:
"""
Synchronizes ``world_size`` agents between each other using the underlying c10d store.
The ``data`` will be available on each of the agents.
Note: The data on the path is not deleted, as a result there can be stale data if
you use the same key_prefix twice.
"""
store.set_timeout(timedelta(seconds=barrier_timeout))
store.set(f"{key_prefix}{rank}", data)
agent_data = get_all(store, rank, key_prefix, world_size)
return agent_data
def barrier(
store, rank: int, world_size: int, key_prefix: str, barrier_timeout: float = 300
) -> None:
"""
A global lock between agents.
Note: Since the data is not removed from the store, the barrier can be used
once per unique ``key_prefix``.
"""
data = f"{rank}".encode(encoding="UTF-8")
synchronize(store, data, rank, world_size, key_prefix, barrier_timeout)
| pytorch-master | torch/distributed/elastic/utils/store.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import inspect
import logging
import os
import warnings
from typing import Optional
from torch.distributed.elastic.utils.log_level import get_log_level
def get_logger(name: Optional[str] = None):
"""
Util function to set up a simple logger that writes
into stderr. The loglevel is fetched from the LOGLEVEL
env. variable or WARNING as default. The function will use the
module name of the caller if no name is provided.
Args:
name: Name of the logger. If no name provided, the name will
be derived from the call stack.
"""
# Derive the name of the caller, if none provided
# Use depth=2 since this function takes up one level in the call stack
return _setup_logger(name or _derive_module_name(depth=2))
def _setup_logger(name: Optional[str] = None):
log = logging.getLogger(name)
log.setLevel(os.environ.get("LOGLEVEL", get_log_level()))
return log
def _derive_module_name(depth: int = 1) -> Optional[str]:
"""
Derives the name of the caller module from the stack frames.
Args:
depth: The position of the frame in the stack.
"""
try:
stack = inspect.stack()
assert depth < len(stack)
# FrameInfo is just a named tuple: (frame, filename, lineno, function, code_context, index)
frame_info = stack[depth]
module = inspect.getmodule(frame_info[0])
if module:
module_name = module.__name__
else:
# inspect.getmodule(frame_info[0]) does NOT work (returns None) in
# binaries built with @mode/opt
# return the filename (minus the .py extension) as modulename
filename = frame_info[1]
module_name = os.path.splitext(os.path.basename(filename))[0]
return module_name
except Exception as e:
warnings.warn(
f"Error deriving logger module name, using <None>. Exception: {e}",
RuntimeWarning,
)
return None
| pytorch-master | torch/distributed/elastic/utils/logging.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from .api import get_env_variable_or_raise, get_socket_with_port, macros # noqa: F401
| pytorch-master | torch/distributed/elastic/utils/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
def get_log_level() -> str:
"""
Return default log level for pytorch.
"""
return "WARNING"
| pytorch-master | torch/distributed/elastic/utils/log_level.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import socket
from string import Template
from typing import List, Any
def get_env_variable_or_raise(env_name: str) -> str:
r"""
Tries to retrieve environment variable. Raises ``ValueError``
if no environment variable found.
Args:
env_name (str): Name of the env variable
"""
value = os.environ.get(env_name, None)
if value is None:
msg = f"Environment variable {env_name} expected, but not set"
raise ValueError(msg)
return value
def get_socket_with_port() -> socket.socket:
addrs = socket.getaddrinfo(
host="localhost", port=None, family=socket.AF_UNSPEC, type=socket.SOCK_STREAM
)
for addr in addrs:
family, type, proto, _, _ = addr
s = socket.socket(family, type, proto)
try:
s.bind(("localhost", 0))
s.listen(0)
return s
except OSError as e:
s.close()
raise RuntimeError("Failed to create a socket")
class macros:
"""
Defines simple macros for caffe2.distributed.launch cmd args substitution
"""
local_rank = "${local_rank}"
@staticmethod
def substitute(args: List[Any], local_rank: str) -> List[str]:
args_sub = []
for arg in args:
if isinstance(arg, str):
sub = Template(arg).safe_substitute(local_rank=local_rank)
args_sub.append(sub)
else:
args_sub.append(arg)
return args_sub
| pytorch-master | torch/distributed/elastic/utils/api.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import socket
from contextlib import closing
import torch.distributed as dist
from torch.distributed.elastic.utils.logging import get_logger
log = get_logger()
_ADDRESS_IN_USE = "Address already in use"
_SOCKET_TIMEOUT = "Socket Timeout"
_MEMBER_CHECKIN = "_tcp_store/num_members"
_LAST_MEMBER_CHECKIN = "_tcp_store/last_member"
def create_c10d_store(
is_server: bool,
server_addr: str,
server_port: int = -1,
world_size: int = 1,
timeout: float = (60 * 10), # 10 min
wait_for_workers: bool = True,
retries=3,
):
if server_port == -1 and world_size > 1:
raise ValueError(
f"server_port must be specified when world_size > 1, got server_port={server_port}, world_size={world_size}"
)
if server_port != -1:
log.info(f"sever_port: {server_port}, specified, ignoring retries")
# only retry when server_port is NOT static
attempt = retries if server_port == -1 else 1
while True:
if server_port != -1:
port = server_port
else:
port = get_free_port()
log.info(
f"Creating c10d store on {server_addr}:{port}\n"
f" world_size : {world_size}\n"
f" is_server : {is_server}\n"
f" timeout(sec): {timeout}\n"
)
try:
store = dist.TCPStore(
host_name=server_addr,
port=port,
world_size=world_size,
is_master=is_server,
timeout=datetime.timedelta(seconds=timeout),
wait_for_workers=wait_for_workers,
)
# skips full rank check when we don't have to wait for all workers
if wait_for_workers:
_check_full_rank(store, world_size)
log.info("Successfully created c10d store")
return store
except RuntimeError as e:
# this is brittle, but the underlying exception type is not properly pybinded
# so we parse the error msg for now, interestingly this is how torch itself
# detects timeouts and port conflicts in their own unittests
# see - caffe2/torch/testing/_internal/common_utils.py
# TODO properly map the exceptions in pybind (c10d/init.cpp)
if str(e) == _ADDRESS_IN_USE: # this will only happen on the server
if attempt < retries:
log.warning(
f"port: {port} already in use, attempt: [{attempt}/{retries}]"
)
attempt += 1
else:
raise RuntimeError(
f"on {server_addr}, port: {port} already in use"
) from e
else:
raise
def _check_full_rank(store, world_size):
idx = store.add(_MEMBER_CHECKIN, 1)
if idx == world_size:
store.set(_LAST_MEMBER_CHECKIN, "<val_ignored>")
try:
store.get(_LAST_MEMBER_CHECKIN)
except RuntimeError as e:
if str(e) == _SOCKET_TIMEOUT:
raise TimeoutError(
f"timed out waiting for all {world_size} members to join"
) from e
else:
raise
def get_free_port():
sock = get_socket_with_port()
with closing(sock):
return sock.getsockname()[1]
def get_socket_with_port() -> socket.socket:
"""
Returns a free port on localhost that is "reserved" by binding a temporary
socket on it. Close the socket before passing the port to the entity
that requires it. Usage example
::
sock = _get_socket_with_port()
with closing(sock):
port = sock.getsockname()[1]
sock.close()
# there is still a race-condition that some other process
# may grab this port before func() runs
func(port)
"""
addrs = socket.getaddrinfo(
host="localhost", port=None, family=socket.AF_UNSPEC, type=socket.SOCK_STREAM
)
for addr in addrs:
family, type, proto, _, _ = addr
s = socket.socket(family, type, proto)
try:
s.bind(("localhost", 0))
s.listen(0)
return s
except OSError as e:
s.close()
log.info("Socket creation attempt failed.", exc_info=e)
raise RuntimeError("Failed to create a socket")
| pytorch-master | torch/distributed/elastic/utils/distributed.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
from torch.utils.data.distributed import DistributedSampler
class ElasticDistributedSampler(DistributedSampler):
"""
Sampler that restricts data loading to a subset of
the dataset for elastic training.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Args:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
start_index (optional): Which index of the dataset to start sampling from
"""
def __init__(self, dataset, num_replicas=None, rank=None, start_index=0):
super().__init__(dataset=dataset, num_replicas=num_replicas, rank=rank)
if start_index >= len(dataset):
raise ValueError(
"Start index {} should be less than dataset size {}".format(
start_index, len(dataset)
)
)
self.start_index = start_index
self.num_samples = int(
math.ceil(float(len(self.dataset) - self.start_index) / self.num_replicas) # type: ignore[arg-type]
)
self.total_size = self.num_samples * self.num_replicas
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = (
torch.randperm(len(self.dataset) - self.start_index, generator=g) # type: ignore[arg-type]
.add(self.start_index)
.tolist()
)
# add extra samples to make it evenly divisible
indices += indices[: (self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank : self.total_size : self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
| pytorch-master | torch/distributed/elastic/utils/data/elastic_distributed_sampler.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
class CyclingIterator:
"""
An iterator decorator that cycles through the
underlying iterator "n" times. Useful to "unroll"
the dataset across multiple training epochs.
The generator function is called as ``generator_fn(epoch)``
to obtain the underlying iterator, where ``epoch`` is a
number less than or equal to ``n`` representing the ``k``th cycle
For example if ``generator_fn`` always returns ``[1,2,3]``
then ``CyclingIterator(n=2, generator_fn)`` will iterate through
``[1,2,3,1,2,3]``
"""
def __init__(self, n: int, generator_fn, start_epoch=0):
self._n = n
self._epoch = start_epoch
self._generator_fn = generator_fn
self._iter = generator_fn(self._epoch)
def __iter__(self):
return self
def __next__(self):
try:
return next(self._iter)
except StopIteration as eod: # eod == end of data
if self._epoch < self._n - 1:
self._epoch += 1
self._iter = self._generator_fn(self._epoch)
return self.__next__()
else:
raise eod
| pytorch-master | torch/distributed/elastic/utils/data/cycling_iterator.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from .cycling_iterator import CyclingIterator # noqa: F401
from .elastic_distributed_sampler import ElasticDistributedSampler # noqa: F401
| pytorch-master | torch/distributed/elastic/utils/data/__init__.py |
pytorch-master | torch/distributed/elastic/agent/__init__.py |
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
The elastic agent is the control plane of torchelastic. It is a process
that launches and manages underlying worker processes. The agent is
responsible for:
1. Working with distributed torch: the workers are started with all the
necessary information to successfully and trivially call
``torch.distributed.init_process_group()``.
2. Fault tolerance: monitors workers and upon detecting worker failures
or unhealthiness, tears down all workers and restarts everyone.
3. Elasticity: Reacts to membership changes and restarts workers with the new
members.
The simplest agents are deployed per node and works with local processes.
A more advanced agent can launch and manage workers remotely. Agents can
be completely decentralized, making decisions based on the workers it manages.
Or can be coordinated, communicating to other agents (that manage workers
in the same job) to make a collective decision.
"""
from .api import ( # noqa: F401
ElasticAgent,
SimpleElasticAgent,
Worker,
WorkerGroup,
RunResult,
WorkerSpec,
WorkerState,
)
| pytorch-master | torch/distributed/elastic/agent/server/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import abc
import functools
import json
import os
import signal
import socket
import time
import traceback
import warnings
from contextlib import closing
from dataclasses import dataclass, field
from enum import Enum
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch.distributed.elastic.rendezvous as rdzv
import torch.distributed.elastic.utils.store as store_util
from torch.distributed import Store
from torch.distributed.elastic.events import Event, EventSource, record
from torch.distributed.elastic.metrics import prof, put_metric
from torch.distributed.elastic.multiprocessing import (
ProcessFailure,
SignalException,
Std,
)
from torch.distributed.elastic.utils.logging import get_logger
__all__ = ['WorkerSpec', 'Worker', 'WorkerState', 'WorkerGroup', 'RunResult', 'ElasticAgent', 'SimpleElasticAgent']
_TERMINAL_STATE_SYNC_ID = "torchelastic/agent/terminal_state"
DEFAULT_ROLE = "default"
log = get_logger()
@dataclass
class WorkerSpec:
"""
Contains blueprint information about a particular type of worker.
For a given role, there must only exist a single worker spec.
Worker spec is expected to be homogenous across all nodes (machine),
that is each node runs the same number of workers for a particular spec.
Args:
role: user-defined role for the workers with this spec
local_world_size: number local workers to run
fn: (deprecated use entrypoint instead)
entrypoint: worker function or command
args: arguments to pass to ``entrypoint``
rdzv_handler: handles rdzv for this set of workers
max_restarts: number of max retries for the workers
monitor_interval: monitor status of workers every ``n`` seconds
master_port: fixed port to run the c10d store on rank 0
if not specified then will chose a random free port
master_addr: fixed master_addr to run the c10d store on rank 0
if not specified then will chose hostname on agent rank 0
redirects: redirect std streams to a file,
selectively redirect for a particular
local rank by passing a map
tee: tees the specified std stream(s) to console + file,
selectively tee for a particular local rank by passing a map,
takes precedence over ``redirects`` settings.
"""
role: str
local_world_size: int
rdzv_handler: rdzv.RendezvousHandler
fn: Optional[Callable] = None
# TODO @kiuk - make entrypoint a required field
entrypoint: Union[Callable, str, None] = None
args: Tuple = ()
max_restarts: int = 3
monitor_interval: float = 30.0
master_port: Optional[int] = None
master_addr: Optional[str] = None
redirects: Union[Std, Dict[int, Std]] = Std.NONE
tee: Union[Std, Dict[int, Std]] = Std.NONE
def __post_init__(self):
assert self.local_world_size > 0
assert self.monitor_interval > 0
if self.fn:
warnings.warn(
"WorkerSpec.fn will be deprecated,"
" please use WorkerSpec.entrypoint instead",
category=DeprecationWarning,
)
self.entrypoint = self.fn
assert self.entrypoint
def get_entrypoint_name(self):
"""
If the entrypoint is a function (e.g. ``Callable``) returns its ``__qualname__``,
else if the entrypoint is a binary (e.g. ``str``), returns the binary name.
"""
if isinstance(self.entrypoint, str):
return os.path.basename(self.entrypoint)
else:
assert self.entrypoint is not None
return self.entrypoint.__qualname__
class Worker:
"""
Represents a worker instance. Contrast this with ``WorkerSpec`` that
represents the specifications of a worker. A ``Worker`` is created from
a ``WorkerSpec``. A ``Worker`` is to a ``WorkerSpec`` as an object is to
a class.
The ``id`` of the worker is interpreted
by the specific implementation of ``ElasticAgent``. For a local
agent, it could be the ``pid (int)`` of the worker, for a remote
agent it could be encoded as ``host:port (string)``.
Args:
id (Any): uniquely identifies a worker (interpreted by the agent)
local_rank (int): local rank of the worker
global_rank (int): global rank of the worker
role_rank (int): rank of the worker across all workers that have the same role
world_size (int): number of workers (globally)
role_world_size (int): number of workers that have the same role
"""
__slots__ = [
"id",
"local_rank",
"global_rank",
"role_rank",
"world_size",
"role_world_size",
]
def __init__(
self,
local_rank: int,
global_rank: int = -1,
role_rank: int = -1,
world_size: int = -1,
role_world_size: int = -1,
):
# unique identifier for this worker
self.id: Any = None
# rank of the worker among workers with the same role being monitored
# by the same ``agent`` instance.
self.local_rank: int = local_rank
# rank of the worker among all the workers across all roles
# across all ``agent`` instances.
# Global rank is not stable between re-rendezvous.
self.global_rank: int = global_rank
# rank of the worker among all the workers with the same role
# across all ``agent`` instances.
# Role rank is not stable between re-rendezvous.
self.role_rank: int = role_rank
# total number of workers (globally). Due to elasticity
# the world size may change between re-rendezvous.
self.world_size: int = world_size
# total number of workers that share the same role. Due to elasticity
# the role world size may change between re-rendezvous.
self.role_world_size: int = role_world_size
def __str__(self):
return (
f"local_rank={self.local_rank},global_rank={self.global_rank}"
f",role_rank={self.role_rank},world_size={self.world_size}"
f",role_world_size={self.role_world_size}"
)
def __repr__(self):
return str(self)
class WorkerState(str, Enum):
"""
State of the ``WorkerGroup``. Workers in a worker group change state as a unit.
If a single worker in a worker group fails the entire set is considered
failed::
UNKNOWN - agent lost track of worker group state, unrecoverable
INIT - worker group object created not yet started
HEALTHY - workers running and healthy
UNHEALTHY - workers running and unhealthy
STOPPED - workers stopped (interruped) by the agent
SUCCEEDED - workers finished running (exit 0)
FAILED - workers failed to successfully finish (exit !0)
A worker group starts from an initial ``INIT`` state,
then progresses to ``HEALTHY`` or ``UNHEALTHY`` states,
and finally reaches a terminal ``SUCCEEDED`` or ``FAILED`` state.
Worker groups can be interrupted and temporarily put into ``STOPPED`` state
by the agent. Workers in ``STOPPED`` state are scheduled to be restarted
in the near future by the agent. Some examples of workers being put into
``STOPPED`` state are:
1. Worker group failure|unhealthy observed
2. Membership change detected
When actions (start, stop, rdzv, retry, etc) on worker group fails
and results in the action being partially applied to the worker group
the state will be ``UNKNOWN``. Typically this happens on uncaught/unhandled
exceptions during state change events on the agent. The agent is not
expected to recover worker groups in ``UNKNOWN`` state and is better off
self terminating and allowing the job manager to retry the node.
"""
UNKNOWN = "UNKNOWN"
INIT = "INIT"
HEALTHY = "HEALTHY"
UNHEALTHY = "UNHEALTHY"
STOPPED = "STOPPED"
SUCCEEDED = "SUCCEEDED"
FAILED = "FAILED"
@staticmethod
def is_running(state: "WorkerState") -> bool:
"""
Returns:
True if the worker state represents workers still running
(e.g. that the process exists but not necessarily healthy).
"""
return state in {WorkerState.HEALTHY, WorkerState.UNHEALTHY}
class WorkerGroup:
"""
Represents the set of ``Worker`` instances for the given ``WorkerSpec``
managed by ``ElasticAgent``. Whether the worker group contains cross
instance workers or not depends on the implementation of the agent.
"""
__slots__ = ["spec", "workers", "store", "group_rank", "group_world_size", "state"]
def __init__(self, spec: WorkerSpec):
self.spec = spec
self.workers = [Worker(local_rank=i) for i in range(self.spec.local_world_size)]
# assigned after rdzv
self.store = None
self.group_rank = None
self.group_world_size = None
self.state = WorkerState.INIT
class _RoleInstanceInfo:
"""
The class is used by the agent to exchange the information with other agents.
The information is used to determine the rank of the workers that agent
manages in heterogeneous environments, where different agents can have
different number of workers.
"""
__slots__ = ["role", "rank", "local_world_size"]
def __init__(self, role: str, rank: int, local_world_size: int):
r"""
Args:
role (str): user-defined role for the workers with this spec
rank (int): the rank of the agent
local_world_size (int): number of local workers to run
"""
self.role = role
self.rank = rank
self.local_world_size = local_world_size
def serialize(self) -> bytes:
dict_data = {
"role": self.role,
"rank": self.rank,
"local_world_size": self.local_world_size,
}
return json.dumps(dict_data).encode(encoding="UTF-8")
@staticmethod
def deserialize(data: bytes):
dict_data = json.loads(data.decode(encoding="UTF-8"))
return _RoleInstanceInfo(
dict_data["role"], dict_data["rank"], dict_data["local_world_size"]
)
@staticmethod
def compare(obj1, obj2) -> int:
if obj1.role == obj2.role:
return obj1.rank - obj2.rank
elif obj1.role > obj2.role:
return 1
else:
return -1
@staticmethod
def find_role_boundaries(roles_infos: List, role: str) -> Tuple[int, int]:
start_idx, end_idx = -1, -1
for idx, role_info in enumerate(roles_infos):
if role_info.role == role:
if start_idx == -1:
start_idx = idx
end_idx = idx
return (start_idx, end_idx)
@dataclass
class RunResult:
"""
Results returned by the worker executions. Run results follow an "all-or-nothing" policy
where the run is successful if and only if ALL local workers managed by this agent
complete successfully.
If the result is successful (e.g. ``is_failed() = False``) then the ``return_values``
field contains the outputs (return values) of the workers managed by THIS agent mapped
by their GLOBAL ranks. That is ``result.return_values[0]`` is the return value of
global rank 0.
.. note:: ``return_values`` are only meaningful for when the worker entrypoint
is a function. Workers specified as a binary entrypoint do not canonically
have a return value and the ``return_values`` field is meaningless and
may be empty.
If ``is_failed()`` returns ``True`` then the ``failures`` field contains the
failure information, again, mapped by the GLOBAL rank of the worker that failed.
The keys in ``return_values`` and ``failures`` are mutually exclusive, that is,
a worker's final state can only be one of: succeeded, failed. Workers intentionally
terminated by the agent according to the agent's restart policy, are not represented
in either ``return_values`` nor ``failures``.
"""
state: WorkerState
return_values: Dict[int, Any] = field(default_factory=dict)
failures: Dict[int, ProcessFailure] = field(default_factory=dict)
def is_failed(self) -> bool:
return self.state == WorkerState.FAILED
def _get_socket_with_port() -> socket.socket:
"""
Returns a free port on localhost that is "reserved" by binding a temporary
socket on it. Close the socket before passing the port to the entity
that requires it. Usage example
::
sock = _get_socket_with_port()
with closing(sock):
port = sock.getsockname()[1]
sock.close()
# there is still a race-condition that some other process
# may grab this port before func() runs
func(port)
"""
addrs = socket.getaddrinfo(
host="localhost", port=None, family=socket.AF_UNSPEC, type=socket.SOCK_STREAM
)
for addr in addrs:
family, type, proto, _, _ = addr
s = socket.socket(family, type, proto)
try:
s.bind(("localhost", 0))
s.listen(0)
return s
except OSError as e:
s.close()
log.info("Socket creation attempt failed.", exc_info=e)
raise RuntimeError("Failed to create a socket")
def _get_fq_hostname() -> str:
return socket.getfqdn(socket.gethostname())
class ElasticAgent(abc.ABC):
"""
Agent process responsible for managing one or more worker processes.
The worker processes are assumed to be regular distributed PyTorch scripts.
When the worker process is created by the agent, the agent provides the
necessary information for the worker processes to properly initialize
a torch process group.
The exact deployment topology and ratio of agent-to-worker is dependent
on the specific implementation of the agent and the user's job placement
preferences. For instance, to run a distributed training job on GPU with
8 trainers (one per GPU) one can:
1. Use 8 x single GPU instances, place an agent per instance, managing
1 worker per agent.
2. Use 4 x double GPU instances, place an agent per instance, managing
2 workers per agent.
3. Use 2 x quad GPU instances, place an agent per instance, managing
4 workers per agent.
4. Use 1 x 8 GPU instance, place an agent per instance, managing
8 workers per agent.
Usage
::
group_result = agent.run()
if group_result.is_failed():
# workers failed
failure = group_result.failures[0]
log.exception(f"worker 0 failed with exit code : {failure.exit_code}")
else:
return group_result.return_values[0] # return rank 0's results
"""
@abc.abstractmethod
def run(self, role: str = DEFAULT_ROLE) -> RunResult:
"""
Runs the agent, retrying the worker group on failures up to
``max_restarts``.
Returns:
The result of the execution, containing the return values or
failure details for each worker mapped by the worker's global rank.
Raises:
Exception - any other failures NOT related to worker process
"""
raise NotImplementedError()
@abc.abstractmethod
def get_worker_group(self, role: str = DEFAULT_ROLE) -> WorkerGroup:
"""
Returns:
The ``WorkerGroup`` for the given ``role``.
Note that the worker group is a mutable object and hence in a
multi-threaded/process environment it may change state.
Implementors are encouraged (but not required) to return
a defensive read-only copy.
"""
raise NotImplementedError()
class SimpleElasticAgent(ElasticAgent):
"""
An ``ElasticAgent`` that manages workers (``WorkerGroup``)
for a single ``WorkerSpec`` (e.g. one particular type of worker role).
"""
def __init__(self, spec: WorkerSpec, exit_barrier_timeout: float = 300):
self._worker_group = WorkerGroup(spec)
self._remaining_restarts = self._worker_group.spec.max_restarts
self._store = None
self._exit_barrier_timeout = exit_barrier_timeout
self._total_execution_time = 0
def get_worker_group(self, role: str = DEFAULT_ROLE) -> WorkerGroup:
return self._worker_group
@abc.abstractmethod
def _start_workers(self, worker_group: WorkerGroup) -> Dict[int, Any]:
r"""
Starts ``worker_group.spec.local_world_size`` number of workers
according to worker spec for the worker group .
Returns a map of ``local_rank`` to worker ``id``.
"""
raise NotImplementedError()
@abc.abstractmethod
def _stop_workers(self, worker_group: WorkerGroup) -> None:
r"""
Stops all workers in the given worker group. Implementors
must deal with workers in all states defined by ``WorkerState``.
That is, it must gracefully handle stopping non-existent workers,
unhealthy (stuck) workers, etc.
"""
raise NotImplementedError()
@abc.abstractmethod
def _monitor_workers(self, worker_group: WorkerGroup) -> RunResult:
r"""
Checks on the workers for the ``worker_group`` and returns
the new state of the worker group.
"""
raise NotImplementedError()
@abc.abstractmethod
def _shutdown(self, death_sig: signal.Signals = signal.SIGTERM) -> None:
"""
Cleans up any resources that were allocated during the agent's work.
Args:
death_sig: Signal to send to the child process, SIGTERM is default
"""
raise NotImplementedError()
@staticmethod
def _set_master_addr_port(
store: Store, master_addr: Optional[str], master_port: Optional[int]
):
if master_port is None:
sock = _get_socket_with_port()
with closing(sock):
master_port = sock.getsockname()[1]
if master_addr is None:
master_addr = _get_fq_hostname()
store.set("MASTER_ADDR", master_addr.encode(encoding="UTF-8"))
store.set("MASTER_PORT", str(master_port).encode(encoding="UTF-8"))
@staticmethod
def _get_master_addr_port(store: Store) -> Tuple[str, int]:
master_addr = store.get("MASTER_ADDR").decode(encoding="UTF-8")
master_port = int(store.get("MASTER_PORT").decode(encoding="UTF-8"))
return (master_addr, master_port)
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `torch.distributed.elastic.metrics.prof`.
@prof
def _rendezvous(self, worker_group: WorkerGroup) -> None:
r"""
Runs rendezvous for the workers specified by worker spec.
Assigns workers a new global rank and world size.
Updates the rendezvous store for the worker group.
"""
spec = worker_group.spec
store, group_rank, group_world_size = spec.rdzv_handler.next_rendezvous()
self._store = store
workers = self._assign_worker_ranks(store, group_rank, group_world_size, spec)
worker_group.workers = workers
worker_group.store = store
worker_group.group_rank = group_rank
worker_group.group_world_size = group_world_size
if group_rank == 0:
self._set_master_addr_port(store, spec.master_addr, spec.master_port)
master_addr, master_port = self._get_master_addr_port(store)
restart_count = spec.max_restarts - self._remaining_restarts
log.info(
f"[{spec.role}] Rendezvous complete for workers. Result:\n"
f" restart_count={restart_count}\n"
f" master_addr={master_addr}\n"
f" master_port={master_port}\n"
f" group_rank={group_rank}\n"
f" group_world_size={group_world_size}\n"
f" local_ranks={[worker.local_rank for worker in workers]}\n"
f" role_ranks={[worker.role_rank for worker in workers]}\n"
f" global_ranks={[worker.global_rank for worker in workers]}\n"
f" role_world_sizes={[worker.role_world_size for worker in workers]}\n"
f" global_world_sizes={[worker.world_size for worker in workers]}\n"
)
def _get_ranks(
self,
role_infos: List[_RoleInstanceInfo],
role_idx: int,
start_idx: int = 0,
end_idx: int = -1,
) -> Tuple[int, List[int]]:
if end_idx == -1:
end_idx = len(role_infos)
prefix_sum = 0
total_sum = 0
for idx in range(start_idx, end_idx):
if role_idx > idx:
prefix_sum += role_infos[idx].local_world_size
total_sum += role_infos[idx].local_world_size
return (
total_sum,
list(range(prefix_sum, prefix_sum + role_infos[role_idx].local_world_size)),
)
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `torch.distributed.elastic.metrics.prof`.
@prof
def _assign_worker_ranks(
self, store, group_rank: int, group_world_size: int, spec: WorkerSpec
) -> List[Worker]:
"""
Determines proper ranks for worker processes. The rank assignment
is done according to the following algorithm:
1. Each agent writes its configuration(group_rank, group_world_size
, num_workers) to the common store.
2. Each agent retrieves configuration for all agents
and performs two level sort using role and rank.
3. Determine the global rank: the global rank of the workers for the current
agent is the offset of the infos array up to group_rank of the agent.
The offset is computed as a sum of local_world_size of all agents that
have rank less than the group_rank. The workers would have the ranks:
[offset, offset+local_world_size)
4. Determine the role rank: The role rank is determined using the algorithms
in the point 3 with the exception that the offset is done from the first
agent that has the same role as current one and has the minimum group rank.
"""
role_infos = self._share_and_gather(store, group_rank, group_world_size, spec)
my_role_info = role_infos[group_rank]
worker_world_size, worker_global_ranks = self._get_ranks(role_infos, group_rank)
role_infos = sorted(
role_infos, key=functools.cmp_to_key(_RoleInstanceInfo.compare)
)
role_start_idx, role_end_idx = _RoleInstanceInfo.find_role_boundaries(
role_infos, my_role_info.role
)
role_pos = next(
idx
for idx, role_info in enumerate(role_infos)
if _RoleInstanceInfo.compare(role_info, my_role_info) == 0
)
role_world_size, role_ranks = self._get_ranks(
role_infos, role_pos, role_start_idx, role_end_idx + 1
)
workers = []
for ind in range(spec.local_world_size):
worker = Worker(
local_rank=ind,
global_rank=worker_global_ranks[ind],
role_rank=role_ranks[ind],
world_size=worker_world_size,
role_world_size=role_world_size,
)
workers.append(worker)
return workers
def _share_and_gather(
self, store, group_rank: int, group_world_size: int, spec: WorkerSpec
) -> List:
agent_role_info = _RoleInstanceInfo(
spec.role, group_rank, spec.local_world_size
)
key_prefix = "torchelastic/role_info"
agent_config_enc = agent_role_info.serialize()
role_infos_bytes = store_util.synchronize(
store, agent_config_enc, group_rank, group_world_size, key_prefix
)
role_infos = [
_RoleInstanceInfo.deserialize(role_info_bytes)
for role_info_bytes in role_infos_bytes
]
return role_infos
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `torch.distributed.elastic.metrics.prof`.
@prof
def _initialize_workers(self, worker_group: WorkerGroup) -> None:
r"""
Starts a fresh set of workers for the worker_group.
Essentially a rendezvous followed by a start_workers.
The caller should first call ``_stop_workers()`` to stop running workers
prior to calling this method.
Optimistically sets the state of the worker group that
just started as ``HEALTHY`` and delegates the actual monitoring
of state to ``_monitor_workers()`` method
"""
role = worker_group.spec.role
log.info(f"[{role}] Rendezvous'ing worker group")
# TODO after stopping workers, wait at least monitor_interval*2 for
# workers on different nodes to fail on a collective op before waiting
# on the rdzv barrier, this way we ensure that nodes enter rdzv
# at around the same time and reduce false positive rdzv timeout errors
self._rendezvous(worker_group)
log.info(f"[{role}] Starting worker group")
worker_ids = self._start_workers(worker_group)
for local_rank, w_id in worker_ids.items():
worker = worker_group.workers[local_rank]
worker.id = w_id
worker_group.state = WorkerState.HEALTHY
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `torch.distributed.elastic.metrics.prof`.
@prof
def _restart_workers(self, worker_group: WorkerGroup) -> None:
"""
Restarts (stops, rendezvous, starts) all local workers in the group.
"""
role = worker_group.spec.role
log.info(f"[{role}] Stopping worker group")
self._stop_workers(worker_group)
worker_group.state = WorkerState.STOPPED
self._initialize_workers(worker_group)
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `torch.distributed.elastic.metrics.prof`.
@prof
def run(self, role: str = DEFAULT_ROLE) -> RunResult:
start_time = time.monotonic()
shutdown_called: bool = False
try:
result = self._invoke_run(role)
self._total_execution_time = int(time.monotonic() - start_time)
self._record_metrics(result)
self._record_worker_events(result)
return result
except SignalException as e:
log.warning(f"Received {e.sigval} death signal, shutting down workers")
self._shutdown(e.sigval)
shutdown_called = True
raise
finally:
if not shutdown_called:
self._shutdown()
# record the execution time in case there were any exceptions during run.
self._total_execution_time = int(time.monotonic() - start_time)
def get_event_failed(self) -> Event:
return self._construct_event(
state="FAILED",
source=EventSource.AGENT,
raw_error=traceback.format_exc(),
)
def get_event_succeeded(self) -> Event:
return self._construct_event(
state="SUCCEEDED",
source=EventSource.AGENT,
)
def _record_worker_events(self, result: RunResult) -> None:
for worker in self._worker_group.workers:
failure = result.failures.get(worker.global_rank)
state: str = self._get_worker_state(worker, result)
raw_error = json.dumps(failure.error_file_data) if failure else None
record(self._construct_event(state, EventSource.WORKER, worker, raw_error))
def _get_worker_state(self, worker: Worker, result: RunResult) -> str:
failure = result.failures.get(worker.global_rank)
if result.state in {WorkerState.UNHEALTHY, WorkerState.FAILED} and not failure:
# The worker got terminated by the torchelastic agent via SIGTERM signal
return "TERMINATED"
elif failure or worker.global_rank in result.return_values:
return result.state.value
else:
raise ValueError(f"Unknow worker: {worker.global_rank}")
def _construct_event(
self,
state: str,
source: EventSource,
worker: Optional[Worker] = None,
raw_error: Optional[str] = None,
) -> Event:
wg = self._worker_group
spec = wg.spec
md = {
"group_world_size": wg.group_world_size,
"entry_point": spec.get_entrypoint_name(),
}
if worker:
md["local_rank"] = (worker.local_rank,)
md["role_rank"] = (worker.role_rank,)
md["role_world_size"] = (worker.role_world_size,)
global_rank = worker.global_rank
worker_id = str(worker.id)
else:
global_rank = None
worker_id = None
md_str = json.dumps(md)
metadata = {
"run_id": spec.rdzv_handler.get_run_id(),
"global_rank": global_rank,
"group_rank": wg.group_rank,
"worker_id": worker_id,
"role": spec.role,
"hostname": _get_fq_hostname(),
"state": state,
"total_run_time": self._total_execution_time,
"rdzv_backend": spec.rdzv_handler.get_backend(),
"raw_error": raw_error,
"metadata": md_str,
"agent_restarts": spec.max_restarts - self._remaining_restarts,
}
return Event(
f"torchelastic.worker.status.{state}", source=source, metadata=metadata
)
def _record_metrics(self, group_results: RunResult):
is_failed = group_results.is_failed()
self._record_flakiness_metric(is_failed)
spec = self._worker_group.spec
restarts_happened = self._remaining_restarts != spec.max_restarts
put_metric(f"workers.{spec.role}.run_total", 1)
self._record_metric_with_condition(
"run_success_with_retries", not is_failed and restarts_happened
)
self._record_metric_with_condition(
"run_success_no_retries", not is_failed and not restarts_happened
)
self._record_metric_with_condition(
"run_failed_with_retries", is_failed and restarts_happened
)
self._record_metric_with_condition(
"run_failed_no_retries", is_failed and not restarts_happened
)
def _record_metric_with_condition(self, metric_name, condition):
spec = self._worker_group.spec
if condition:
put_metric(f"workers.{spec.role}.{metric_name}", 1)
else:
put_metric(f"workers.{spec.role}.{metric_name}", 0)
def _record_flakiness_metric(self, is_failed: bool = False):
if is_failed:
flakiness = 100.0
else:
spec = self._worker_group.spec
flakiness = 100.0 - 100.0 * (self._remaining_restarts + 1) / (
spec.max_restarts + 1
)
spec = self._worker_group.spec
put_metric(f"workers.{spec.role}.flakiness", int(flakiness))
def _invoke_run(self, role: str = DEFAULT_ROLE) -> RunResult:
# NOTE: currently only works for a single role
spec = self._worker_group.spec
role = spec.role
log.info(
f"[{role}] starting workers for entrypoint: {spec.get_entrypoint_name()}"
)
self._initialize_workers(self._worker_group)
monitor_interval = spec.monitor_interval
rdzv_handler = spec.rdzv_handler
while True:
assert self._worker_group.state != WorkerState.INIT
time.sleep(monitor_interval)
run_result = self._monitor_workers(self._worker_group)
state = run_result.state
self._worker_group.state = state
put_metric(f"workers.{role}.remaining_restarts", self._remaining_restarts)
put_metric(f"workers.{role}.{state.name.lower()}", 1)
if state == WorkerState.SUCCEEDED:
log.info(
f"[{role}] worker group successfully finished."
f" Waiting {self._exit_barrier_timeout} seconds for other agents to finish."
)
self._exit_barrier()
return run_result
elif state in {WorkerState.UNHEALTHY, WorkerState.FAILED}:
if self._remaining_restarts > 0:
log.info(
f"[{role}] Worker group {state.name}. "
f"{self._remaining_restarts}/{spec.max_restarts} attempts left;"
f" will restart worker group"
)
self._remaining_restarts -= 1
self._restart_workers(self._worker_group)
else:
self._stop_workers(self._worker_group)
self._worker_group.state = WorkerState.FAILED
self._exit_barrier()
return run_result
elif state == WorkerState.HEALTHY:
# membership changes do not count as retries
num_nodes_waiting = rdzv_handler.num_nodes_waiting()
group_rank = self._worker_group.group_rank
if num_nodes_waiting > 0:
log.info(
f"[{role}] Detected {num_nodes_waiting} "
f"new nodes from group_rank={group_rank}; "
f"will restart worker group"
)
self._restart_workers(self._worker_group)
else:
raise Exception(f"[{role}] Worker group in {state.name} state")
def _exit_barrier(self):
"""
Wait for ``exit_barrier_timeout`` seconds for all agents to finish
executing their local workers (either successfully or not). This
acts as a safety guard against user scripts that terminate at different
times. This barrier keeps the agent process alive until all workers finish.
"""
log.info(
f"Local worker group finished ({self._worker_group.state}). "
f"Waiting {self._exit_barrier_timeout} seconds for other agents to finish"
)
start = time.time()
try:
store_util.barrier(
self._store,
self._worker_group.group_rank,
self._worker_group.group_world_size,
key_prefix=_TERMINAL_STATE_SYNC_ID,
barrier_timeout=self._exit_barrier_timeout,
)
log.info(
f"Done waiting for other agents. Elapsed: {time.time() - start} seconds"
)
except SignalException as e:
log.warn(f"Got termination signal: {e.sigval}")
raise
except Exception:
log.exception(
f"Error waiting on exit barrier. Elapsed: {time.time() - start} seconds"
)
| pytorch-master | torch/distributed/elastic/agent/server/api.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import shutil
import signal
import tempfile
from typing import Any, Dict, Optional, Tuple
from torch.distributed.elastic.agent.server.api import (
RunResult,
SimpleElasticAgent,
WorkerGroup,
WorkerSpec,
WorkerState,
)
from torch.distributed.elastic.metrics.api import prof
from torch.distributed.elastic.multiprocessing import PContext, start_processes
from torch.distributed.elastic.utils import macros
from torch.distributed.elastic.utils.logging import get_logger
log = get_logger()
__all__ = ['LocalElasticAgent']
class LocalElasticAgent(SimpleElasticAgent):
"""
An implementation of :py:class:`torchelastic.agent.server.ElasticAgent`
that handles host-local workers.
This agent is deployed per host and is configured to spawn ``n`` workers.
When using GPUs, ``n`` maps to the number of GPUs available on the host.
The local agent does not communicate to other local agents deployed on
other hosts, even if the workers may communicate inter-host. The worker id
is interpreted to be a local process. The agent starts and stops all worker
processes as a single unit.
The worker function and argument passed to the worker function must be
python multiprocessing compatible. To pass multiprocessing data structures
to the workers you may create the data structure in the same multiprocessing
context as the specified ``start_method`` and pass it as a function argument.
The ``exit_barrier_timeout`` specifies the amount of time (in seconds) to wait
for other agents to finish. This acts as a safety net to handle cases where
workers finish at different times, to prevent agents from viewing workers
that finished early as a scale-down event. It is strongly advised that the
user code deal with ensuring that workers are terminated in a synchronous
manner rather than relying on the exit_barrier_timeout.
Example launching function
::
def trainer(args) -> str:
return "do train"
def main():
start_method="spawn"
shared_queue= multiprocessing.get_context(start_method).Queue()
spec = WorkerSpec(
role="trainer",
local_world_size=nproc_per_process,
entrypoint=trainer,
args=("foobar",),
...<OTHER_PARAMS...>)
agent = LocalElasticAgent(spec, start_method)
results = agent.run()
if results.is_failed():
print("trainer failed")
else:
print(f"rank 0 return value: {results.return_values[0]}")
# prints -> rank 0 return value: do train
Example launching binary
::
def main():
spec = WorkerSpec(
role="trainer",
local_world_size=nproc_per_process,
entrypoint="/usr/local/bin/trainer",
args=("--trainer_args", "foobar"),
...<OTHER_PARAMS...>)
agent = LocalElasticAgent(spec)
results = agent.run()
if not results.is_failed():
print("binary launches do not have return values")
"""
def __init__(
self,
spec: WorkerSpec,
start_method="spawn",
exit_barrier_timeout: float = 300,
log_dir: Optional[str] = None,
):
super().__init__(spec, exit_barrier_timeout)
self._start_method = start_method
self._pcontext: Optional[PContext] = None
rdzv_run_id = spec.rdzv_handler.get_run_id()
self._log_dir = self._make_log_dir(log_dir, rdzv_run_id)
def _make_log_dir(self, log_dir: Optional[str], rdzv_run_id: str):
base_log_dir = log_dir or tempfile.mkdtemp(prefix="torchelastic_")
os.makedirs(base_log_dir, exist_ok=True)
dir = tempfile.mkdtemp(prefix=f"{rdzv_run_id}_", dir=base_log_dir)
log.info(f"log directory set to: {dir}")
return dir
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `torch.distributed.elastic.metrics.prof`.
@prof
def _stop_workers(self, worker_group: WorkerGroup) -> None:
self._shutdown()
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `torch.distributed.elastic.metrics.prof`.
@prof
def _start_workers(self, worker_group: WorkerGroup) -> Dict[int, Any]:
spec = worker_group.spec
store = worker_group.store
assert store is not None
master_addr, master_port = super()._get_master_addr_port(store)
restart_count = spec.max_restarts - self._remaining_restarts
use_agent_store = spec.rdzv_handler.get_backend() == "static"
args: Dict[int, Tuple] = {}
envs: Dict[int, Dict[str, str]] = {}
for worker in worker_group.workers:
local_rank = worker.local_rank
worker_env = {
"LOCAL_RANK": str(local_rank),
"RANK": str(worker.global_rank),
"GROUP_RANK": str(worker_group.group_rank),
"ROLE_RANK": str(worker.role_rank),
"ROLE_NAME": spec.role,
"LOCAL_WORLD_SIZE": str(spec.local_world_size),
"WORLD_SIZE": str(worker.world_size),
"GROUP_WORLD_SIZE": str(worker_group.group_world_size),
"ROLE_WORLD_SIZE": str(worker.role_world_size),
"MASTER_ADDR": master_addr,
"MASTER_PORT": str(master_port),
"TORCHELASTIC_RESTART_COUNT": str(restart_count),
"TORCHELASTIC_MAX_RESTARTS": str(spec.max_restarts),
"TORCHELASTIC_RUN_ID": spec.rdzv_handler.get_run_id(),
"TORCHELASTIC_USE_AGENT_STORE": str(use_agent_store),
"NCCL_ASYNC_ERROR_HANDLING": os.getenv(
"NCCL_ASYNC_ERROR_HANDLING", str(1)
),
}
if "OMP_NUM_THREADS" in os.environ:
worker_env["OMP_NUM_THREADS"] = os.environ["OMP_NUM_THREADS"]
envs[local_rank] = worker_env
worker_args = list(spec.args)
worker_args = macros.substitute(worker_args, str(local_rank))
args[local_rank] = tuple(worker_args)
# scaling events do not count towards restarts (gets same attempt #)
# remove existing log dir if this restart is due to a scaling event
attempt_log_dir = os.path.join(self._log_dir, f"attempt_{restart_count}")
shutil.rmtree(attempt_log_dir, ignore_errors=True)
os.makedirs(attempt_log_dir)
assert spec.entrypoint is not None
self._pcontext = start_processes(
name=spec.role,
entrypoint=spec.entrypoint,
args=args,
envs=envs,
log_dir=attempt_log_dir,
start_method=self._start_method,
redirects=spec.redirects,
tee=spec.tee,
)
return self._pcontext.pids()
def _shutdown(self, death_sig: signal.Signals = signal.SIGTERM) -> None:
if self._pcontext:
self._pcontext.close(death_sig)
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `torch.distributed.elastic.metrics.prof`.
@prof
def _monitor_workers(self, worker_group: WorkerGroup) -> RunResult:
role = worker_group.spec.role
worker_pids = {w.id for w in worker_group.workers}
assert self._pcontext is not None
pc_pids = set(self._pcontext.pids().values())
if worker_pids != pc_pids:
log.error(
f"[{role}] worker pids do not match process_context pids."
f" Expected: {worker_pids}, actual: {pc_pids}"
)
return RunResult(state=WorkerState.UNKNOWN)
result = self._pcontext.wait(0)
if result:
if result.is_failed():
# map local rank failure to global rank
worker_failures = {}
for local_rank, failure in result.failures.items():
worker = worker_group.workers[local_rank]
worker_failures[worker.global_rank] = failure
return RunResult(
state=WorkerState.FAILED,
failures=worker_failures,
)
else:
# copy ret_val_queue into a map with a global ranks
workers_ret_vals = {}
for local_rank, ret_val in result.return_values.items():
worker = worker_group.workers[local_rank]
workers_ret_vals[worker.global_rank] = ret_val
return RunResult(
state=WorkerState.SUCCEEDED,
return_values=workers_ret_vals,
)
else:
return RunResult(state=WorkerState.HEALTHY)
| pytorch-master | torch/distributed/elastic/agent/server/local_elastic_agent.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Dict
_log_handlers: Dict[str, logging.Handler] = {
"console": logging.StreamHandler(),
"dynamic_rendezvous": logging.NullHandler(),
"null": logging.NullHandler(),
}
def get_logging_handler(destination: str = "null") -> logging.Handler:
global _log_handlers
return _log_handlers[destination]
| pytorch-master | torch/distributed/elastic/events/handlers.py |
#!/usr/bin/env/python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
Module contains events processing mechanisms that are integrated with the standard python logging.
Example of usage:
::
from torch.distributed.elastic import events
event = events.Event(name="test_event", source=events.EventSource.WORKER, metadata={...})
events.get_logging_handler(destination="console").info(event)
"""
import inspect
import logging
import os
import socket
import traceback
from enum import Enum
from typing import Dict, Optional
from torch.distributed.elastic.events.handlers import get_logging_handler
from .api import ( # noqa: F401
Event,
EventMetadataValue,
EventSource,
NodeState,
RdzvEvent,
)
_events_loggers: Dict[str, logging.Logger] = {}
def _get_or_create_logger(destination: str = "null") -> logging.Logger:
"""
Constructs python logger based on the destination type or extends if provided.
Available destination could be found in ``handlers.py`` file.
The constructed logger does not propagate messages to the upper level loggers,
e.g. root logger. This makes sure that a single event can be processed once.
Args:
destination: The string representation of the event handler.
Available handlers found in ``handlers`` module
"""
global _events_loggers
if destination not in _events_loggers:
_events_logger = logging.getLogger(f"torchelastic-events-{destination}")
_events_logger.setLevel(os.environ.get("LOGLEVEL", "INFO"))
# Do not propagate message to the root logger
_events_logger.propagate = False
logging_handler = get_logging_handler(destination)
_events_logger.addHandler(logging_handler)
# Add the logger to the global dictionary
_events_loggers[destination] = _events_logger
return _events_loggers[destination]
def record(event: Event, destination: str = "null") -> None:
_get_or_create_logger(destination).info(event.serialize())
def record_rdzv_event(event: RdzvEvent) -> None:
_get_or_create_logger("dynamic_rendezvous").info(event.serialize())
def construct_and_record_rdzv_event(
run_id: str,
message: str,
node_state: NodeState,
name: str = "",
hostname: str = "",
pid: Optional[int] = None,
master_endpoint: str = "",
local_id: Optional[int] = None,
rank: Optional[int] = None,
) -> None:
# We don't want to perform an extra computation if not needed.
if isinstance(get_logging_handler("dynamic_rendezvous"), logging.NullHandler):
return
# Set up parameters.
if not hostname:
hostname = socket.getfqdn()
if not pid:
pid = os.getpid()
# Determines which file called this function.
callstack = inspect.stack()
filename = "no_file"
if len(callstack) > 1:
stack_depth_1 = callstack[1]
filename = os.path.basename(stack_depth_1.filename)
if not name:
name = stack_depth_1.function
# Delete the callstack variable. If kept, this can mess with python's
# garbage collector as we are holding on to stack frame information in
# the inspect module.
del callstack
# Set up error trace if this is an exception
if node_state == NodeState.FAILED:
error_trace = traceback.format_exc()
else:
error_trace = ""
# Initialize event object
event = RdzvEvent(
name=f"{filename}:{name}",
run_id=run_id,
message=message,
hostname=hostname,
pid=pid,
node_state=node_state,
master_endpoint=master_endpoint,
rank=rank,
local_id=local_id,
error_trace=error_trace,
)
# Finally, record the event.
record_rdzv_event(event)
| pytorch-master | torch/distributed/elastic/events/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import json
from dataclasses import asdict, dataclass, field
from enum import Enum
from typing import Dict, Union, Optional
__all__ = ['EventSource', 'Event', 'NodeState', 'RdzvEvent']
EventMetadataValue = Union[str, int, float, bool, None]
class EventSource(str, Enum):
"""
Known identifiers of the event producers.
"""
AGENT = "AGENT"
WORKER = "WORKER"
@dataclass
class Event:
"""
The class represents the generic event that occurs during the torchelastic
job execution. The event can be any kind of meaningful action.
Args:
name: event name.
source: the event producer, e.g. agent or worker
timestamp: timestamp in milliseconds when event occured.
metadata: additional data that is associated with the event.
"""
name: str
source: EventSource
timestamp: int = 0
metadata: Dict[str, EventMetadataValue] = field(default_factory=dict)
def __str__(self):
return self.serialize()
@staticmethod
def deserialize(data: Union[str, "Event"]) -> "Event":
if isinstance(data, Event):
return data
if isinstance(data, str):
data_dict = json.loads(data)
data_dict["source"] = EventSource[data_dict["source"]]
return Event(**data_dict)
def serialize(self) -> str:
return json.dumps(asdict(self))
class NodeState(str, Enum):
"""
The states that a node can be in rendezvous.
"""
INIT = "INIT"
RUNNING = "RUNNING"
SUCCEEDED = "SUCCEEDED"
FAILED = "FAILED"
@dataclass
class RdzvEvent:
"""
Dataclass to represent any rendezvous event.
Args:
name: Event name. (E.g. Current action being performed)
run_id: The run id of the rendezvous
message: The message describing the event
hostname: Hostname of the node
pid: The process id of the node
node_state: The state of the node (INIT, RUNNING, SUCCEEDED, FAILED)
master_endpoint: The master endpoint for the rendezvous store, if known
rank: The rank of the node, if known
local_id: The local_id of the node, if defined in dynamic_rendezvous.py
error_trace: Error stack trace, if this is an error event.
"""
name: str
run_id: str
message: str
hostname: str
pid: int
node_state: NodeState
master_endpoint: str = ""
rank: Optional[int] = None
local_id: Optional[int] = None
error_trace: str = ""
def __str__(self):
return self.serialize()
@staticmethod
def deserialize(data: Union[str, "RdzvEvent"]) -> "RdzvEvent":
if isinstance(data, RdzvEvent):
return data
if isinstance(data, str):
data_dict = json.loads(data)
data_dict["node_state"] = NodeState[data_dict["node_state"]]
return RdzvEvent(**data_dict)
def serialize(self) -> str:
return json.dumps(asdict(self))
| pytorch-master | torch/distributed/elastic/events/api.py |
import sys
import torch
def is_available():
return hasattr(torch._C, "_dist_autograd_init")
if is_available() and not torch._C._dist_autograd_init():
raise RuntimeError("Failed to initialize torch.distributed.autograd")
if is_available():
from torch._C._distributed_autograd import (
get_gradients,
backward,
_init,
_new_context,
_release_context,
_get_max_id,
_is_valid_context,
_retrieve_context,
_current_context,
_get_debug_info,
DistAutogradContext,
)
class context(object):
'''
Context object to wrap forward and backward passes when using
distributed autograd. The ``context_id`` generated in the ``with``
statement is required to uniquely identify a distributed backward pass
on all workers. Each worker stores metadata associated with this
``context_id``, which is required to correctly execute a distributed
autograd pass.
Example::
>>> import torch.distributed.autograd as dist_autograd
>>> # xdoctest: +SKIP
>>> with dist_autograd.context() as context_id:
>>> t1 = torch.rand((3, 3), requires_grad=True)
>>> t2 = torch.rand((3, 3), requires_grad=True)
>>> loss = rpc.rpc_sync("worker1", torch.add, args=(t1, t2)).sum()
>>> dist_autograd.backward(context_id, [loss])
'''
def __enter__(self):
self.autograd_context = _new_context()
return self.autograd_context._context_id()
def __exit__(self, type, value, traceback):
_release_context(self.autograd_context._context_id())
| pytorch-master | torch/distributed/autograd/__init__.py |
from .join import Join
from .join import Joinable
from .join import JoinHook
| pytorch-master | torch/distributed/algorithms/__init__.py |
import warnings
from abc import ABC, abstractmethod
from types import TracebackType
from typing import Any, List, NamedTuple, Optional, Type
import torch
import torch.distributed as dist
__all__ = ['JoinHook', 'Joinable', 'Join']
class JoinHook():
r"""
This defines a join hook, which provides two entry points in the join
context manager: a main hook, which is called repeatedly while there exists
a non-joined process, and a post-hook, which is called once all processes
have joined.
To implement a join hook for the generic join context manager, define a
class that inherits from :class:`JoinHook` and override ``main_hook()`` and
``post_hook()`` as appropriate.
"""
def main_hook(self) -> None:
r"""
This hook is called repeatedly while there exists a non-joined process
to shadow collective communications in one training iteration (i.e. in
one forward pass, backward pass, and optimizer step).
"""
...
def post_hook(self, is_last_joiner: bool) -> None:
r"""
This hook is called after all processes have joined. It is passed an
additional ``bool`` argument ``is_last_joiner``, which indicates if the
rank is one of the last to join.
Arguments:
is_last_joiner (bool): ``True`` if the rank is one of the last to
join; ``False`` otherwise.
"""
...
class Joinable(ABC):
r"""
This defines an abstract base class for joinable classes. A joinable class
(inheriting from :class:`Joinable`) should implement :meth:`join_hook`,
which returns a :class:`JoinHook` instance, in addition to
:meth:`join_device` and :meth:`join_process_group` that return device and
process group information, respectively.
"""
@abstractmethod
def __init__(self):
super(Joinable, self).__init__()
self._join_config = _JoinConfig.construct_disabled_join_config()
@abstractmethod
def join_hook(self, **kwargs) -> JoinHook:
r"""
Returns a :class:`JoinHook` instance for the given :class:`Joinable`.
Arguments:
kwargs (dict): a :class:`dict` containing any keyword arguments
to modify the behavior of the join hook at run time; all
:class:`Joinable` instances sharing the same join context
manager are forwarded the same value for ``kwargs``.
"""
...
@property
@abstractmethod
def join_device(self) -> torch.device:
r"""
Returns the device from which to perform collective communications
needed by the join context manager implementation itself.
"""
...
@property
@abstractmethod
def join_process_group(self) -> Any:
r"""
Returns the process group for the collective communications needed by
the join context manager itself.
"""
...
class _JoinConfig(NamedTuple):
r"""
This includes all fields needed from a :class:`Joinable` instance for the
join context manager side.
"""
enable: bool
throw_on_early_termination: bool
is_first_joinable: bool
@staticmethod
def construct_disabled_join_config():
r"""
Returns a :class:`_JoinConfig` instance indicating that join-related
logic should be disabled, e.g. if the caller is not in a join context
manager.
"""
return _JoinConfig(
enable=False,
throw_on_early_termination=False,
is_first_joinable=False
)
class Join():
r"""
This class defines the generic join context manager, which allows custom
hooks to be called after a process joins. These hooks should shadow the
collective communications of non-joined processes to prevent hanging and
erroring and to ensure algorithmic correctness. Refer to :class:`JoinHook`
for details about the hook definition.
.. warning::
The context manager requires each participating :class:`Joinable` to
call the method :meth:`notify_join_context()` before its own per-
iteration collective communications to ensure correctness.
.. warning::
The context manager requires that all ``process_group`` attributes in
the :class:`JoinHook` objects are the same. If there are multiple
:class:`JoinHook` objects, then the ``device`` of the first is used.
The process group and device information is used for checking for non-
joined processes and for notifying processes to throw an exception if
``throw_on_early_termination`` is enabled, both of which using an all-
reduce.
Arguments:
joinables (List[Joinable]): a list of the participating
:class:`Joinable` s; their hooks are iterated over in the given
order.
enable (bool): a flag enabling uneven input detection; setting to
``False`` disables the context manager's functionality and should
only be set when the user knows the inputs will not be uneven
(default: ``True``).
throw_on_early_termination (bool): a flag controlling whether to throw an
exception upon detecting uneven inputs (default: ``False``).
Example::
>>> import os
>>> import torch
>>> import torch.distributed as dist
>>> import torch.multiprocessing as mp
>>> # xdoctest: +SKIP
>>> import torch.nn.parallel.DistributedDataParallel as DDP
>>> import torch.distributed.optim.ZeroRedundancyOptimizer as ZeRO
>>> from torch.distributed.algorithms.join import Join
>>>
>>> # On each spawned worker
>>> def worker(rank):
>>> dist.init_process_group("nccl", rank=rank, world_size=2)
>>> model = DDP(torch.nn.Linear(1, 1).to(rank), device_ids=[rank])
>>> optim = ZeRO(model.parameters(), torch.optim.Adam, lr=0.01)
>>> # Rank 1 gets one more input than rank 0
>>> inputs = [torch.tensor([1.]).to(rank) for _ in range(10 + rank)]
>>> with Join([model, optim]):
>>> for input in inputs:
>>> loss = model(input).sum()
>>> loss.backward()
>>> optim.step()
>>> # All ranks reach here without hanging/erroring
"""
def __init__(
self,
joinables: List[Joinable],
enable: bool = True,
throw_on_early_termination: bool = False,
**kwargs,
):
if len(joinables) == 0:
raise ValueError("The join context manager requires at least one joinable")
self._joinables = joinables
self._join_hooks = [joinable.join_hook(**kwargs) for joinable in self._joinables]
self._enable = enable
self._throw_on_early_termination = throw_on_early_termination
self._set_joinable_configs()
self._extract_dist_info()
def _set_joinable_configs(self) -> None:
r"""
Sets the :class:`_JoinConfig` of each participating :class:`Joinable`.
"""
assert len(self._joinables) > 0
is_first_joinable = True
for joinable in self._joinables:
joinable._join_config = _JoinConfig(
enable=self._enable,
throw_on_early_termination=self._throw_on_early_termination,
is_first_joinable=is_first_joinable
)
is_first_joinable = False
def _extract_dist_info(self) -> None:
r"""
Extracts the process group and device information from the joinables.
If there are multiple joinables, then the context manager uses the
first specified device.
Preconditions:
``self._joinables`` is not ``None`` and is non-empty.
Raises:
ValueError
If there are multiple conflicting ``process_group`` attributes
among the ``Joinable`` objects.
"""
process_group = None
device = None
for joinable in self._joinables:
if process_group is None:
process_group = joinable.join_process_group
elif process_group != joinable.join_process_group:
raise ValueError("Using join context manager with multiple process groups")
if device is None:
device = joinable.join_device
self._process_group = process_group
self._rank = dist.get_rank(self._process_group)
self._device = device
def __enter__(self):
...
def __exit__(
self,
type: Optional[Type[BaseException]],
value: Optional[BaseException],
traceback: Optional[TracebackType]
):
r"""
Repeatedly runs the main hooks until all processes join; then, runs
the post-hooks.
Raises:
RuntimeError
If ``throw_on_early_termination=True``.
"""
if not self._enable or type:
return # propagate the exception directly if one was raised
all_procs_joined = False
is_last_joiner = True
i = 0
WARN_THRESHOLD = 1000
warnings.simplefilter("once")
while not all_procs_joined:
if i > WARN_THRESHOLD:
warnings.warn(
"Detected uneven input skew of greater than "
f"{WARN_THRESHOLD}. This means that rank "
f"{self._rank} has at least {WARN_THRESHOLD} "
f"fewer inputs than other currently-active ranks. "
"This level of skew could lead to performance "
"degradation during training."
)
# Shadow the all-reduce in non-joined processes
num_nonjoined_procs = self._get_num_nonjoined_procs()
if num_nonjoined_procs == 0:
all_procs_joined = True
else:
if self._throw_on_early_termination:
self._notify_procs_to_terminate()
# Run main hooks
for join_hook in self._join_hooks:
join_hook.main_hook()
is_last_joiner = False
i += 1
# Run post-hooks
for join_hook in self._join_hooks:
join_hook.post_hook(is_last_joiner)
def _get_num_nonjoined_procs(self):
r"""
Returns the number of non-joined processes by shadowing an all-reduce
in the non-joined processes.
"""
num_nonjoined_procs = torch.zeros(1, device=self._device)
dist.all_reduce(num_nonjoined_procs, group=self._process_group)
return num_nonjoined_procs.item()
def _notify_procs_to_terminate(self):
r"""
Schedules an all-reduce to notify non-joined processes to terminate
and raises a ``RuntimeError`` indicating that the current process has
exhausted its inputs.
"""
ones = torch.ones(1, device=self._device)
dist.all_reduce(ones, group=self._process_group)
raise RuntimeError(f"Rank {self._rank} exhausted all inputs.")
@staticmethod
def notify_join_context(joinable: Joinable):
r"""
Notifies the join context manager that the calling process has not yet
joined; then, if ``throw_on_early_termination=True``, checks if uneven
inputs have been detected (i.e. if one process has already joined) and
throws an exception if so.
This method should be called from a :class:`Joinable` object before
its per-iteration collective communications. For example, this should
be called at the beginning of the forward pass in
:class:`DistributedDataParallel`.
Only the first :class:`Joinable` object passed into the context
manager performs the collective communications in this method, and
for the others, this method is vacuous.
Arguments:
joinable (Joinable): the :class:`Joinable` object calling this
method.
Returns:
An async work handle for the all-reduce meant to notify the context
manager that the process has not yet joined if ``joinable`` is the
first one passed into the context manager; ``None`` otherwise.
"""
assert hasattr(joinable, "_join_config"), \
f"Check that the {type(joinable)} constructor calls the " \
"``Joinable`` constructor"
join_config = joinable._join_config
# First joinable is responsible for the collective communications
if not join_config.is_first_joinable or not join_config.enable:
return None
device = joinable.join_device
process_group = joinable.join_process_group
# Schedule an all-reduce to indicate that the caller has not yet joined
ones = torch.ones(1, device=device)
work = dist.all_reduce(ones, group=process_group, async_op=True)
if join_config.throw_on_early_termination:
# Check if uneven inputs have been detected
zeros = torch.zeros(1, device=device)
dist.all_reduce(zeros, group=process_group)
should_throw = zeros.item()
if should_throw:
raise RuntimeError(
"Detected at least one rank that exhausted inputs. "
"Throwing across all ranks."
)
return work
| pytorch-master | torch/distributed/algorithms/join.py |
from . import default_hooks as default
LOW_PRECISION_HOOKS = [
default.fp16_compress_hook,
default.bf16_compress_hook,
]
| pytorch-master | torch/distributed/algorithms/_comm_hooks/__init__.py |
import functools
import torch
import torch.distributed as dist
from torch.distributed import distributed_c10d
class DefaultState(object):
r"""
Stores state needed to perform the default ``all_reduce`` algorithm
within a communication hook.
Args:
process_group (ProcessGroup): The process group to be used for all-reduce.
"""
__slots__ = [
"process_group",
"world_size",
"gradient_predivide_factor",
"gradient_postdivide_factor"
]
def __init__(
self,
process_group
):
self.process_group = process_group if process_group is not None else distributed_c10d._get_default_group()
self.world_size = dist.get_world_size(process_group)
self.gradient_predivide_factor = self._get_gradient_predivide_factor(
self.world_size
)
self.gradient_postdivide_factor = self.world_size / self.gradient_predivide_factor
# setting two factors `self.gradient_predivide_factor`
# and `self.gradient_postdivide_factor` to avoid underflow and overflow
def _get_gradient_predivide_factor(self, world_size: int) -> float:
factor: int = 1
while world_size % factor == 0 and world_size / factor > factor:
factor *= 2
return float(factor)
class LowPrecisionState(DefaultState):
r"""
Stores state needed to perform gradient communication in a lower precision
within a communication hook. Communication hook will cast gradients back
to the original parameter precision specified by ``parameter_type`` (default: torch.float32).
Builds on top of the :class:`DefaultState`.
Args:
parameter_type (torch.dtype): The precision of model's parameters.
Required for a hook to cast gradients back to a parameter's precision.
"""
__slots__ = [
"parameter_type",
]
def __init__(
self,
process_group,
parameter_type=torch.float32,
):
super().__init__(process_group)
self.parameter_type = parameter_type
def _decompress(state: LowPrecisionState, grad: torch.Tensor):
"""
Casts gradients back to full parameter precision so that
further computation happens in full precision
"""
orig_grad_data = grad.data
grad.data = grad.data.to(state.parameter_type)
# Don't let this memory get reused until after the transfer.
orig_grad_data.record_stream(torch.cuda.current_stream()) # type: ignore[arg-type]
def allreduce_hook(state: DefaultState, grad: torch.Tensor):
r"""
This FSDP communication hook implements ``all_reduce`` algorithm
and a necessary pre- and post-division of gradients.
Args:
state (DefaultState): State information, configures pre- and post-division factors
grad (torch.Tensor): A gradient for the local batch that needs to be communicated across ranks.
"""
if state.gradient_predivide_factor > 1:
grad.div_(state.gradient_predivide_factor)
dist.all_reduce(grad, group=state.process_group)
if state.gradient_postdivide_factor > 1:
grad.div_(state.gradient_postdivide_factor)
def lower_precision_hook(prec: torch.dtype, state: LowPrecisionState, grad: torch.Tensor):
grad.data = grad.data.to(prec)
allreduce_hook(state, grad)
_decompress(state, grad)
def fp16_compress_hook(state: LowPrecisionState, grad: torch.Tensor):
r"""
This FSDP communication hook implements a simple gradient compression
approach that casts ``grad`` to half-precision floating-point format (``torch.float16``).
It also averages gradients by ``world_size`` in two steps: first it pre-divides gradients by a
``state.predivide_factor``, and after an allreduce step gradients are averaged by a ``state.postdivide_factor``.
Onse post-division is done, compressed gradients are casted back to parameters' precision.
Args:
state (DefaultState): State information, configures pre- and post-division factors
grad (torch.Tensor): A gradient for the local batch that needs to be communicated across ranks in a lower precision.
"""
fp16_hook = functools.partial(lower_precision_hook, torch.float16)
return fp16_hook(state, grad)
def bf16_compress_hook(state: LowPrecisionState, grad: torch.Tensor):
r"""
This FSDP communication hook implements a simple gradient compression
approach that casts ``grad`` to half-precision floating-point format (``torch.float16``).
It also averages gradients by ``world_size`` in two steps: first it pre-divides gradients by a
``state.predivide_factor``, and after an allreduce step gradients are averaged by a ``state.postdivide_factor``.
Onse post-division is done, compressed gradients are casted back to parameters' precision.
Args:
state (DefaultState): State information, configures pre- and post-division factors
grad (torch.Tensor): A gradient for the local batch that needs to be communicated across ranks in a lower precision.
"""
bf16_hook = functools.partial(lower_precision_hook, torch.bfloat16)
return bf16_hook(state, grad)
| pytorch-master | torch/distributed/algorithms/_comm_hooks/default_hooks.py |
from enum import Enum, auto
from contextlib import suppress
import torch
from torch.autograd.graph import save_on_cpu
from torch.utils.checkpoint import checkpoint
from torch.distributed.utils import _replace_by_prefix
import torch.nn as nn
from typing import Any, Dict, Iterator, Tuple
from functools import partial
_CHECKPOINT_PREFIX = "_checkpoint_wrapped_module"
class CheckpointImpl(Enum):
REENTRANT = auto()
NO_REENTRANT = auto()
class CheckpointWrapper(torch.nn.Module):
"""
An nn.Module that wraps another nn.Module with checkpointing.
"""
def __init__(
self,
mod: torch.nn.Module,
checkpoint_impl: CheckpointImpl = CheckpointImpl.REENTRANT,
offload_to_cpu: bool = False,
checkpoint_fn=None,
*checkpoint_fn_args,
**checkpoint_fn_kwargs,
):
super().__init__()
self._checkpoint_wrapped_module = mod
self.checkpoint_impl = checkpoint_impl
self.offload_to_cpu = offload_to_cpu
if checkpoint_fn is None:
# use torch.utils.checkpoint
self.checkpoint_fn = partial(
checkpoint,
use_reentrant=(
self.checkpoint_impl == CheckpointImpl.REENTRANT
),
)
else:
self.checkpoint_fn = partial(
checkpoint_fn,
*checkpoint_fn_args,
**checkpoint_fn_kwargs,
)
# state_dict post hook to remove prefix to allow loading into a
# non-checkpoint wrapped module.
self._register_state_dict_hook(self._post_state_dict_hook)
# load_state_dict pre-hook to allow loading back into
# checkpoint-wrapped module.
self._register_load_state_dict_pre_hook(
self._pre_load_state_dict_hook, with_module=True
)
def __getattr__(self, name: str) -> Any:
"""Forward missing attributes to wrapped module."""
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
return getattr(self._checkpoint_wrapped_module, name)
def __getitem__(self, key: int) -> Any:
"""Forward indexing calls in case the module is a nn.Sequential."""
return self._checkpoint_wrapped_module.__getitem__(key) # type: ignore[operator]
def forward(self, *args, **kwargs):
offload_mgr = save_on_cpu(pin_memory=True) if self.offload_to_cpu else suppress()
with offload_mgr: # type: ignore[attr-defined]
return self.checkpoint_fn(
self._checkpoint_wrapped_module,
*args,
**kwargs
)
def named_parameters(
self,
*args,
**kwargs,
) -> Iterator[Tuple[str, torch.nn.Parameter]]:
"""
Overrides :meth:`named_parameters()` to intercept parameter names and
remove all occurrences of _CHECKPOINT_PREFIX.
"""
for param_name, param in super().named_parameters(*args, **kwargs):
yield param_name.replace(f"{_CHECKPOINT_PREFIX}.", ""), param
@staticmethod
def _post_state_dict_hook(
module: nn.Module,
state_dict: Dict[str, Any],
prefix: str,
*args: Any,
) -> Dict[str, Any]:
"""
_post_state_dict_hook() is called after the state_dict() of this
FSDP module is executed. For ``checkpoint_wrapper``, it will strip
checkpoint-wrapped module prefix so that this module can be loaded into
non-checkpointed modules. It would still be able to be loaded into
checkpoint-wrapped modules as this class adds the prefix back before
loading the state_dict.
"""
_replace_by_prefix(state_dict, f"{prefix}{_CHECKPOINT_PREFIX}.", prefix)
return state_dict
@staticmethod
def _pre_load_state_dict_hook(
module: nn.Module,
state_dict: Dict[str, Any],
prefix: str,
*args: Any,
) -> None:
"""
``_pre_state_dict_hook` is called before ``self._load_from_state_dict()``
is called. For ``checkpoint_wrapper``, it will add back the module
prefix so that non-checkpointed modules can be loaded into
checkpoint_wrapper modules properly.
"""
_replace_by_prefix(state_dict, prefix, prefix + f"{_CHECKPOINT_PREFIX}.")
def checkpoint_wrapper(
module: torch.nn.Module,
checkpoint_impl: CheckpointImpl = CheckpointImpl.REENTRANT,
offload_to_cpu: bool = False,
checkpoint_fn=None,
*checkpoint_fn_args,
**checkpoint_fn_kwargs,
) -> torch.nn.Module:
"""
A convenience wrapper for activation checkpointing. If the module is wrapped
with this function, all subsequent calls to the module will automatically
perform checkpointing without the user having to explicitly call ``checkpoint``
function.
Usage::
checkpointed_module = checkpoint_wrapper(module)
outputs = checkpointed_module(inputs)
Args:
module (nn.Module):
The module to be wrapped
checkpoint_impl (Optional[CheckpointImpl]):
The checkpointing implementation to use. Currently only
CheckpointImpl.REENTRANT is supported. Note that this will only
be passed into the ``torch.utils.checkpoint.checkpoint``
implementation, and is ignored if a custom ``checkpoint_fn`` is
specified.
offload_to_cpu (Optional[bool]):
Whether to offload outer activations to CPU. Note that this
currently only works with CheckpointImpl.REENTRANT.
checkpoint_fn (Optional[Callable]):
Functional checkpoint implementation to use. If this is specified,
it will be used over the default ``torch.utils.checkpoint.checkpoint``
implementation and the `checkpoint_impl` argument will be ignored.
*checkpoint_fn_args: (Sequence[Any]): Arguments to pass into `checkpoint_fn`.
**checkpoint_fn_kwargs: (Dict[str, Any]): Keyword arguments to pass into `checkpoint_fn`.
Returns:
(nn.Module):
Wrapped module
"""
return CheckpointWrapper(
module, checkpoint_impl, offload_to_cpu, checkpoint_fn, checkpoint_fn_args, checkpoint_fn_kwargs
)
def apply_activation_checkpointing_wrapper(
model, checkpoint_wrapper_fn=checkpoint_wrapper, check_fn=lambda _: True
):
"""
Applies :func:`checkpoint_wrapper` to modules within `model` based on a user-defined
configuration. For each module within `model`, the `check_fn` is used to decide
whether `module` should be wrapped with :func:`checkpoint_wrapper` or not.
Note::
This function modifies `model` in place and replaces appropriate layers with
their checkpoint-wrapped modules.
Note::
This function will not wrap the overall root module. If this is needed, please directly use
:class:`CheckpointWrapper`.
Usage::
model = nn.Sequential(
nn.Linear(10, 10), nn.Linear(10, 10), nn.Linear(10, 10)
)
check_fn = lambda l: isinstance(l, nn.Linear)
apply_activation_checkpointing(model, checkpoint_wrapper_fn=checkpoint_wrapper, check_fn=check_fn)
Args:
module (nn.Module):
The model who's submodules (or self) should be wrapped with activation checkpointing.
checkpoint_wrapper_fn (Optional[Callable[nn.Module]])
A `Callable` which will wrap modules
check_fn (Optional[Callable[nn.Module, nn.Module]])
A lambda function which will be passed current layer and returns
``True`` or ``False`` depending on whether input layer should be wrapped.
Returns: None (`model` is modified inplace)
"""
# TODO: Importing inside function to avoid circular import issue between FSDP and
# checkpoint_wrapper. This can be resolved once wrap() APIs are decoupled from FSDP code.
from torch.distributed.fsdp.wrap import _recursive_wrap, lambda_auto_wrap_policy
return _recursive_wrap(
module=model,
auto_wrap_policy=partial(lambda_auto_wrap_policy, lambda_fn=check_fn),
wrapper_cls=checkpoint_wrapper_fn,
ignored_modules=set(),
ignored_params=set(),
only_wrap_children=True
)
| pytorch-master | torch/distributed/algorithms/_checkpoint/checkpoint_wrapper.py |
pytorch-master | torch/distributed/algorithms/_checkpoint/__init__.py |
|
pytorch-master | torch/distributed/algorithms/model_averaging/__init__.py |
|
import warnings
from abc import ABC, abstractmethod
from typing import Union, Iterable, Dict
import torch
import torch.distributed as dist
import torch.distributed.algorithms.model_averaging.utils as utils
__all__ = ['ModelAverager', 'PeriodicModelAverager']
class ModelAverager(ABC):
r"""Base class for all model averagers.
Args:
process_group: The process group to be used for all-reduce.
If ``None``, the default process group, which
is created by :func:`torch.distributed.init_process_group`,
will be used. (default: ``None``)
"""
def __init__(self, process_group=None):
self.process_group = (
process_group if process_group is not None else dist.group.WORLD
)
self.step = 0
@abstractmethod
def average_parameters(self, params):
raise NotImplementedError
class PeriodicModelAverager(ModelAverager):
r"""
Averages parameters periodically after the warm-up stage.
This can be used for running `post-local SGD <https://arxiv.org/abs/1808.07217>`_,
by running :class:`~torch.nn.DistributedDataParallel` (DDP)
using the subgroups created by :meth:`~torch.distributed.new_subgroups`.
Args:
period (int): The number of steps per model averaging.
Usually the period should be greater than ``1`` to reduce the communication cost.
Otherwise, only DDP needs to be used.
warmup_steps (int): The number of warm-up steps. During this stage,
model averaging is skipped.
process_group: The process group to be used for all-reduce.
If ``None``, the default process group, which
is created by :func:`torch.distributed.init_process_group`,
will be used. (default: ``None``)
Example::
>>> # xdoctest: +SKIP("undefined variables")
>>> import torch
>>> import torch.distributed as dist
>>> import torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook as post_localSGD
>>> import torch.distributed.algorithms.model_averaging.averagers as averagers
>>> import torch.nn as nn
>>>
>>> dist.init_process_group("nccl", rank=rank, world_size=16)
>>> torch.cuda.set_device(rank)
>>> module = nn.Linear(1, 1, bias=False).cuda()
>>> model = nn.parallel.DistributedDataParallel(
>>> module, device_ids=[rank], output_device=rank
>>> )
>>> # Register a post-localSGD communication hook.
>>> state = PostLocalSGDState(process_group=None, subgroup=None, start_localSGD_iter=100)
>>> model.register_comm_hook(state, post_localSGD_hook)
>>>
>>> # In the first 100 steps, run global gradient averaging like normal DDP at every step.
>>> # After 100 steps, run model averaging every 4 steps.
>>> # Note that ``warmup_steps`` must be the same as ``start_localSGD_iter`` used in ``PostLocalSGDState``.
>>> averager = averagers.PeriodicModelAverager(period=4, warmup_steps=100)
>>> for step in range(0, 200):
>>> optimizer.zero_grad()
>>> loss = loss_fn(output, labels)
>>> loss.backward()
>>> optimizer.step()
>>> # Will average model parameters globally every 4 steps. Thus,
>>> # inter-node communication only occurs every 4 iterations after
>>> # the initial ``warmup_steps`` period.
>>> averager.average_parameters(model.parameters())
"""
def __init__(
self,
period,
warmup_steps=0,
process_group=None
):
super().__init__(process_group)
if warmup_steps < 0:
raise ValueError("Arg ``warmup_steps`` must be a non-negative number.")
self.warmup_steps = warmup_steps
if period < 1:
raise ValueError("Arg ``period`` must be a positive value.")
elif period == 1:
warnings.warn(
"When period is 1, no need to use model averaging because the communication cost "
"of all-reducing parameters will be no less than the cost of all-reducing gradients "
"by DistributedDataParallel in the backward pass. Therefore, only "
"DistributedDataParallel should be used for this case."
)
self.period = period
def average_parameters(self, params: Union[Iterable[torch.nn.Parameter], Iterable[Dict[str, torch.nn.Parameter]]]):
"""
Averages parameters or parameter groups of an optimizer if ``step`` is no less than ``warmup_steps``
and it can be divided by ``period``, where ``step`` is increased by 1
at each iteration in the training loop.
Args:
params: The parameters of a model or parameter groups of an optimizer.
"""
if (
self.step >= self.warmup_steps
and (self.step - self.warmup_steps) % self.period == 0
):
utils.average_parameters_or_parameter_groups(params, self.process_group)
self.step += 1
| pytorch-master | torch/distributed/algorithms/model_averaging/averagers.py |
# flake8: noqa C101
import itertools
from typing import Union, Iterable, Dict, Iterator
import torch
import torch.distributed as dist
# The two imports below are not always available depending on the
# USE_DISTRIBUTED compile flag. Make sure they raise import error
# if we're trying to use them.
from torch.distributed import ProcessGroup, group
def average_parameters(
params: Iterator[torch.nn.Parameter], process_group: ProcessGroup
):
"""
Averages all the given parameters.
For allreduce efficiency, all the parameters are flattened into a contiguous buffer.
Thus, it requires extra memory of the same size as the given parameters.
"""
group_to_use = process_group if process_group is not None else group.WORLD
# Do not update any parameter if not in the process group.
if dist._rank_not_in_group(group_to_use):
return
params_it1, params_it2 = itertools.tee(params)
# If the input parameters have different data types,
# packing these parameters will trigger an implicit type up-casting.
# The original parameter data types will be restored during the subsequent unpacking.
flat_params = torch.cat([p.data.reshape(-1) for p in params_it1])
flat_params /= dist.get_world_size(group_to_use)
# Make sure the allreduce will not conflict with any other ongoing process group.
if torch.cuda.is_available():
torch.cuda.synchronize()
dist.all_reduce(flat_params, group=group_to_use)
offset = 0
for p in params_it2:
p.data = flat_params[offset : offset + p.numel()].view_as(p).type_as(p)
offset += p.numel()
def get_params_to_average(params: Union[Iterable[torch.nn.Parameter], Iterable[Dict[str, torch.nn.Parameter]]]):
"""
Returns a list of parameters that need to average, which filters out the parameters that do not contain any gradients.
Args:
params: The parameters of a model or parameter groups of an optimizer.
"""
filtered_params = []
for param in params:
if isinstance(param, torch.nn.Parameter):
# model.parameters() input
param_data = param
if param_data.grad is not None:
filtered_params.append(param_data)
elif isinstance(param, dict):
# optimizer.param_groups input
for param_data in param["params"]:
if param_data.grad is not None:
filtered_params.append(param_data)
else:
raise NotImplementedError(f"Parameter input of type {type(param)} is not supported")
return filtered_params
def average_parameters_or_parameter_groups(params: Union[Iterable[torch.nn.Parameter], Iterable[Dict[str, torch.nn.Parameter]]], process_group: ProcessGroup):
"""
Averages parameters of a model or parameter groups of an optimizer.
"""
average_parameters(iter(get_params_to_average(params)), process_group)
| pytorch-master | torch/distributed/algorithms/model_averaging/utils.py |
# Copyright 2022 Cruise LLC
import logging
import warnings
from collections import OrderedDict
from typing import Union, Iterable, Dict
import torch
import torch.distributed as dist
import torch.distributed.algorithms.model_averaging.averagers as averagers
import torch.distributed.algorithms.model_averaging.utils as utils
logger = logging.getLogger(__name__)
class HierarchicalModelAverager(averagers.ModelAverager):
r"""
Runs hierarchical model averaging (`hierarchical SGD <https://arxiv.org/pdf/2010.12998.pdf>`_).
Process groups of different sizes are organized in a hierarhicy, and they average parameters
by using different periods concurrently after the warm-up stage.
This is an extension of :class:`~torch.distributed.algorithms.model_averaging.averagers.PeriodicModelAverager`
that supports `post-local SGD <https://arxiv.org/abs/1808.07217>`_, which essentially only supports
a two-level hierarchy: the intra-machine level and the global level, where the intra-machine
level is usually embedded in :meth:`~torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook`.
Similarly, the process groups within this class do not have such an intra-machine process
subgroup, which should be embedded by the post-local SGD communication hook instead.
Args:
period_group_size_dict: An ordered dict mapping keys of model averaging period to
process group size, used for initializing process groups of
different sizes in a hierarchy to average parameters concurrently.
Particularly, at each iteration, there will be at most a single
process group that runs averaging -- the period of such group should
have the largest period which the current step can be divided by.
For example, if the dict has three keys: 2, 4, and 8,
then this means totally three process groups will be created to
average parameters every 2, 4, and 8 iterations, respectively.
At the 4th iteration, only the second process group will run
averaging, because the first process group should be a
subset of the second process group, and no need to execute the first
process group redundantly.
On the other hand, the third process group can only be triggered
every 8 iterations, so it will not be triggered at the 4th iteration.
warmup_steps (int): The number of warm-up steps. During this stage, model averaging is skipped.
process_group (ProcessGroup, optional): The overall process group containing all the processes that runs model averaging.
If ``None``, the default process group, which is created
by :func:`torch.distributed.init_process_group`, will be used.
(default: ``None``)
Example::
>>> # xdoctest: +SKIP('undefined rank')
>>> from collections import OrderedDict
>>> import torch
>>> import torch.distributed as dist
>>> from torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook import (
>>> PostLocalSGDState,
>>> post_localSGD_hook,
>>> )
>>> import torch.distributed.algorithms.model_averaging.hierarchical_model_averager as hierarchicalSGD
>>> import torch.nn as nn
>>>
>>> dist.init_process_group("nccl", rank=rank, world_size=16)
>>> torch.cuda.set_device(rank)
>>> module = nn.Linear(1, 1, bias=False).to(rank)
>>> model = nn.parallel.DistributedDataParallel(
>>> module, device_ids=[rank], output_device=rank
>>> )
>>> # Register a post-localSGD communication hook.
>>> # Assume that each machine has 4 GPUs, then each intra-machine subgroup has a size of 4.
>>> subgroup, _ = dist.new_subgroups()
>>> state = PostLocalSGDState(subgroup=subgroup, start_localSGD_iter=100)
>>> model.register_comm_hook(state, post_localSGD_hook)
>>>
>>> # Average parameters among each group of 8 processes every 4 iterations, and among all
>>> # the 16 processes every 16 iterations.
>>> averager = hierarchicalSGD.HierarchicalModelAverager(
>>> period_group_size_dict=OrderedDict([(4, 8), (16, 16)]), warmup_steps=100)
>>> # Note that ``warmup_steps`` must be the same as ``start_localSGD_iter`` used in ``PostLocalSGDState``.
>>> # In the first 100 steps, run global gradient averaging like normal DDP at every step.
>>> # After 100 steps, run model averaging at two levels.
>>> for step in range(0, 200):
>>> optimizer.zero_grad()
>>> loss = loss_fn(output, labels)
>>> loss.backward()
>>> optimizer.step()
>>> # Average parameters after ``optimizer.step()``.
>>> # Thus, the inter-node communication only occurs periodically after ``warmup_steps``.
>>> averager.average_parameters(model.parameters())
.. warning ::
The last group size in the dict must be the size of the provided ``process_group``,
which indicates model averaging at the highest level of the hierarchy.
If ``process_group`` is not provided, then the last group size should be equal to the world size.
.. warning ::
`HierarchicalModelAverager` is experimental and subject to change.
"""
def __init__(self, period_group_size_dict=None, warmup_steps=0, process_group=None):
super().__init__(process_group)
if not period_group_size_dict:
raise ValueError("Arg ``period_group_size_dict`` must not be empty.")
self._periods = list(period_group_size_dict.keys())
if self._periods[0] <= 0:
raise ValueError("The minimum period in arg ``period_group_size_dict`` must be a positive value.")
elif self._periods[-1] == 1:
warnings.warn(
"When the maximum period in arg ``period_group_size_dict`` is 1, "
"no need to use model averaging because the communication cost "
"of all-reducing parameters will be no less than the cost of all-reducing gradients "
"by DistributedDataParallel in the backward pass. Therefore, only "
"DistributedDataParallel should be used for this case."
)
overall_group_size = dist.get_world_size(group=self.process_group)
if list(period_group_size_dict.values())[-1] != overall_group_size:
raise ValueError(
f"The last value in arg ``period_process_group_dict`` {list(period_group_size_dict.values())[-1]} "
f"must be equal to the size of arg ``process_group`` {overall_group_size}."
)
self.period_process_group_dict = OrderedDict()
logger.info("Model averaging hierarchy:")
for period, group_size in period_group_size_dict.items():
logger.info(
f"\tEach group that has {group_size} processes average parameters every {period} iterations, "
"if no higher-level averaging.")
if group_size != overall_group_size:
self.period_process_group_dict[period], _ = dist.new_subgroups(
group_size=group_size, group=self.process_group)
else:
self.period_process_group_dict[period] = self.process_group
if warmup_steps < 0:
raise ValueError("Arg ``warmup_steps`` must be a non-negative number.")
self.warmup_steps = warmup_steps
def _find_process_group(self):
"""
Returns a process group as the value of an ``period_process_group_dict`` entry,
if ``step`` can be divided by a period in the keys of ``period_process_group_dict``.
If ``step`` can be divided by multiple periods in the keys of ``period_process_group_dict``,
then the returned process group is the one corresponding to the largest period,
since this process group will be used for averaging parameters at this ``step``.
Returns ``None`` if not found.
"""
for period in reversed(self._periods):
if self.step % period == 0:
return self.period_process_group_dict[period]
return None
def average_parameters(self, params: Union[Iterable[torch.nn.Parameter], Iterable[Dict[str, torch.nn.Parameter]]]):
"""
Averages parameters or parameter groups of an optimizer if ``step`` is no less than ``warmup_steps``
and it can be divided by a period in the keys of ``period_process_group_dict``,
where ``step`` is increased by 1 at each iteration in the training loop.
If ``step`` can be divided by multiple periods in the keys of ``period_process_group_dict``,
only the largest period is used, and the corresponding process group is used for averaging parameters.
Args:
params: The parameters of a model or parameter groups of an optimizer.
"""
if self.step >= self.warmup_steps:
group = self._find_process_group()
if group is not None:
utils.average_parameters_or_parameter_groups(params, group)
self.step += 1
| pytorch-master | torch/distributed/algorithms/model_averaging/hierarchical_model_averager.py |
import torch
import torch.distributed as dist
from torch import nn
def _quantize_per_tensor_cuda(x, scale, zero_point):
y = torch.round(x / scale) + zero_point
y = torch.clamp(y, 0, 255).to(torch.uint8)
return y
def _dequantize_per_tensor_cuda(y, scale, zero_point):
x = scale * (y.to(torch.float32) - zero_point)
return x
def _quantize_per_channel_cuda(x, scale, zero_point):
y = torch.zeros(x.size(), device=x.device)
for i in range(x.size()[0]):
y[i, :] = torch.round(x[i, :] / scale[i]) + zero_point[i]
y = torch.clamp(y, 0, 255).to(torch.uint8)
return y
def _dequantize_per_channel_cuda(y, scale, zero_point):
y = y.to(torch.float32).cuda(y.device)
x = torch.zeros_like(y, device=y.device)
for i in range(x.size()[0]):
x[i, :] = scale[i] * (y[i, :] - zero_point[i])
return x
def _get_allgather_out_list(all_gather_in_list, world_size):
out_list = [
torch.zeros_like(
all_gather_in_list,
device=all_gather_in_list.device,
dtype=all_gather_in_list.dtype,
)
for _ in range(world_size)
]
return out_list
def quantization_pertensor_hook(
process_group: dist.ProcessGroup, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
"""
Applies the ``torch.quantize_per_tensor`` logic to DDP using ``allgather``
protocol. Workers first allgather the scale and zero point of their own
``GradBucket`` prior to the quantization. After all workers have that information,
the first ``then`` callback called ``quantize_and_allgather`` quantizes worker's
own gradient tensor, and uses ``allgather`` to communicate these accross all workers.
The final ``then`` callback called ``dequantize_and_aggregate``, dequantizes and
aggregates each quantized gradient tensor locally and returns the mean.
.. warning ::
This is experimental, and uses ``allgather`` protocol which is considerably slower than
``allreduce`` protocol. It works only with flattened grads.
Example::
>>> # xdoctest: +SKIP
>>> ddp_model.register_comm_hook(process_group, quantization_pertensor_hook)
"""
group_to_use = process_group if process_group is not None else dist.group.WORLD
rank = process_group.rank() if process_group is not None else dist.get_rank()
world_size = group_to_use.size()
tensor = bucket.buffer()
myObserver = torch.quantization.MinMaxObserver().cuda(tensor.device)
myObserver(tensor)
s, z = myObserver.calculate_qparams()
s_and_z = torch.FloatTensor([s, z]).cuda(tensor.device)
all_ranks_s_and_z = _get_allgather_out_list(s_and_z, world_size)
# First, allgather scale and zeros.
fut = dist.all_gather(
all_ranks_s_and_z, s_and_z, group=group_to_use, async_op=True
).get_future()
def quantize_and_allgather(fut):
# Store scale and zeros accross all workers.
all_ranks_s_and_z = fut.wait()[0]
# All workers quantize their own ``GradBucket`` tensors.
quantized_tensor = _quantize_per_tensor_cuda(
tensor, all_ranks_s_and_z[rank][0], all_ranks_s_and_z[rank][1]
)
# Allgather quantized tensors.
fut = dist.all_gather(
_get_allgather_out_list(quantized_tensor, world_size),
quantized_tensor,
group=group_to_use,
async_op=True,
).get_future()
return fut.wait()
def dequantize_and_aggregate(fut):
all_ranks_quantized_tensor = fut.wait()[0]
aggregated_dequantized_tensor = torch.zeros_like(
all_ranks_quantized_tensor[0], device=tensor.device, dtype=torch.float32
)
# Using previously allgathered scales and zeros, dequantize gradient tensors
# locally and then aggregate them.
for r, quantized_tensor in enumerate(all_ranks_quantized_tensor):
aggregated_dequantized_tensor += _dequantize_per_tensor_cuda(
quantized_tensor, all_ranks_s_and_z[r][0], all_ranks_s_and_z[r][1]
)
return aggregated_dequantized_tensor / world_size
return fut.then(quantize_and_allgather).then(dequantize_and_aggregate)
def quantization_perchannel_hook(
process_group: dist.ProcessGroup, bucket: dist.GradBucket, bucket_size=512
) -> torch.futures.Future[torch.Tensor]:
"""
Applies the ``torch.quantize_per_channel`` logic to DDP using ``allgather``
protocol. Compared to pertensor, the main motivation of perchannel is
for considerably large tensors such as a tensor that contains 6 million
elements quantizing per a bucket size of 512 (or 128) elements may significantly
increase the resolution.
It first splits ``GradBucket`` tensor into multiple chunks (channels) of ``bucket_size``
elements. Then, workers allgather the scales and zero points of their own
``GradBucket`` prior to the quantization. After all workers have that information,
the first ``then`` callback called ``quantize_and_allgather`` quantizes worker's
own gradient tensor, and uses ``allgather`` to communicate these accross all workers.
The final ``then`` callback called ``dequantize_and_aggregate``, dequantizes, flattens, and
aggregates each quantized gradient tensor locally and returns the mean.
.. warning ::
This is experimental, and uses ``allgather`` protocol which is considerably slower than
``allreduce`` protocol. It works only with flattened grads.
Example::
>>> # xdoctest: +SKIP
>>> ddp_model.register_comm_hook(process_group, quantization_perchannel_hook)
"""
group_to_use = process_group if process_group is not None else dist.group.WORLD
rank = process_group.rank() if process_group is not None else dist.get_rank()
world_size = group_to_use.size()
tensor = bucket.buffer()
tensor_in_channels = (
nn.functional.pad(
input=tensor,
pad=(0, bucket_size - len(tensor) % bucket_size),
mode="constant",
value=0,
)
.view(-1, bucket_size)
.cuda(tensor.device)
)
myPerChannelObserver = torch.quantization.PerChannelMinMaxObserver().cuda(
tensor.device
)
myPerChannelObserver(tensor_in_channels)
s_ch, z_ch = myPerChannelObserver.calculate_qparams()
s_and_z = torch.stack((s_ch, z_ch)).cuda(tensor.device)
all_ranks_s_and_z = _get_allgather_out_list(s_and_z, world_size)
# First, allgather scale and zeros.
fut = dist.all_gather(
all_ranks_s_and_z, s_and_z, group=group_to_use, async_op=True
).get_future()
def quantize_and_allgather(fut):
# Store scale and zeros accross all workers.
all_ranks_s_and_z = fut.wait()[0]
# All workers quantize their corresponding ``GradBucket`` tensors.
quantized_tensor = _quantize_per_channel_cuda(
tensor_in_channels,
all_ranks_s_and_z[rank, 0, :],
all_ranks_s_and_z[rank, 1, :],
)
# Allgather quantized tensors.
fut = dist.all_gather(
_get_allgather_out_list(quantized_tensor, world_size),
quantized_tensor,
group=group_to_use,
async_op=True,
).get_future()
return fut.wait()
def dequantize_and_aggregate(fut):
all_ranks_quantized_tensor = fut.wait()[0]
aggregated_dequantized_tensor = torch.zeros_like(
all_ranks_quantized_tensor[0], device=tensor.device, dtype=torch.float32
)
# Using previously allgathered scales and zeros, dequantize gradient tensors
# locally and then aggregate them.
for r, quantized_tensor in enumerate(all_ranks_quantized_tensor):
aggregated_dequantized_tensor += _dequantize_per_channel_cuda(
quantized_tensor, all_ranks_s_and_z[r][0], all_ranks_s_and_z[r][1]
)
return (
torch.flatten(aggregated_dequantized_tensor).cuda(tensor.device)[
: tensor.size()[0]
]
/ world_size
)
return fut.then(quantize_and_allgather).then(dequantize_and_aggregate)
| pytorch-master | torch/distributed/algorithms/ddp_comm_hooks/quantization_hooks.py |
from typing import Any, Callable
import torch
import torch.distributed as dist
_FUNCTIONAL_OPTIM_STEP_METHOD_NAME = "step_param"
class _OptimizerHookState(object):
"""
Holds state for running optimizer in-line after DDP communication hook.
Currently contains only optimizer class which must have a method `step_param`.
"""
__slots__ = ["functional_optimizer", "params_to_optimize"]
def __init__(self, functional_optim, params=None):
self.functional_optimizer = functional_optim
self._check_valid_functional_optim()
self._set_params_to_optimize(params)
def _set_params_to_optimize(self, params):
if params is not None:
self.params_to_optimize = set(params)
def _check_valid_functional_optim(self):
if not hasattr(self.functional_optimizer, _FUNCTIONAL_OPTIM_STEP_METHOD_NAME):
raise ValueError(
f"Class {type(self.functional_optimizer)} must implement method "
f"{_FUNCTIONAL_OPTIM_STEP_METHOD_NAME}."
)
def _hook_then_optimizer(
hook: Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]],
optimizer_state: _OptimizerHookState,
) -> Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]:
r"""
Runs optimizer in a functional fashion after DDP communication hook.
"""
has_set_params = (
hasattr(optimizer_state, 'params_to_optimize')
and optimizer_state.params_to_optimize is not None
)
def hook_then_optimizer_wrapper(
hook_state, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
# Run original hook
fut = hook(hook_state, bucket)
def optimizer_step(fut):
gradient_tensors = bucket.gradients()
model_params = bucket.parameters()
for grad_tensor, model_param in zip(gradient_tensors, model_params):
if not has_set_params or model_param in optimizer_state.params_to_optimize:
optimizer_state.functional_optimizer.step_param(
model_param,
grad_tensor,
)
return bucket.buffer()
return fut.then(optimizer_step)
return hook_then_optimizer_wrapper
| pytorch-master | torch/distributed/algorithms/ddp_comm_hooks/optimizer_overlap_hooks.py |
import logging
import torch
import torch.distributed as dist
from . import default_hooks as default
logger = logging.getLogger(__name__)
class PostLocalSGDState(object):
r"""
Stores the state for all-reducing gradients globally using ``process_group`` until step ``start_localSGD_iter``,
and all-reducing gradients locally using ``subgroup`` afterwards.
If ``process_group`` is ``None``, the global process group will be used.
If ``subgroup`` is ``None``, the intra-node process group on each machine will be used.
Additionally, ``post_local_gradient_allreduce`` may be worth tuning,
because both true and false may give a faster convergence.
"""
__slots__ = [
"process_group",
"subgroup",
"start_localSGD_iter",
"post_local_gradient_allreduce",
"iter",
]
def __init__(
self,
process_group,
subgroup,
start_localSGD_iter,
post_local_gradient_allreduce=True,
):
logger.info(
"Local SGD will be started after {} iterations".format(start_localSGD_iter)
)
# The group used for all-reducing gradients globally.
self.process_group = process_group
# The group used for all-reducing gradients locally.
self.subgroup = subgroup
self.start_localSGD_iter = start_localSGD_iter
# Allreduce gradients locally since iteration `start_localSGD_iter`.
# This may help with the convergence efficiency at the cost of relatively cheap intra-subgroup communication.
self.post_local_gradient_allreduce = post_local_gradient_allreduce
# Iteration/step in the training loop.
self.iter = 0
def maybe_increase_iter(self, bucket):
# Since bucket 0 is the last bucket to allreduce in an iteration.
# Only increase `iter` when bucket 0 is processed.
if bucket.is_last():
self.iter += 1
if self.iter == self.start_localSGD_iter:
logger.info(
"Start to apply local SGD after {} iterations.".format(self.iter)
)
def post_localSGD_hook(
state: PostLocalSGDState, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
"""
This DDP communication hook is used for running post-localSGD algorithm,
by combining with a model averaging component (e.g.,
:class:`~torch.distributed.algorithms.model_averaging.averagers.PeriodicModelAverager`)
that runs after the optimizer step.
Args:
state (PostLocalSGDState): State information to run post-localSGD.
Users mainly need to tune ``start_localSGD_iter`` to determine when to start local SGD.
bucket (dist.GradBucket): Bucket that stores a 1D flattened gradient tensor that batches multiple per-variable tensors.
Note that since DDP comm hook only supports single process single device mode,
only exactly one tensor is stored in this bucket.
Returns:
Future handler of the communication, which updates the gradients in place.
Example::
>>> # xdoctest: +SKIP
>>> state = PostLocalSGDState(process_group=process_group, subgroup=subgroup,
start_localSGD_iter=10)
>>> ddp_model.register_comm_hook(state, post_localSGD_hook)
>>> # Also need to establish a model averaging module and run model averaging after ``optimizer.step()``.
>>> # Please refer to the examples in ``torch.distributed.algorithms.model_averaging.averagers`` module.
"""
global_group_to_use = (
state.process_group if state.process_group is not None else dist.group.WORLD
)
# The input tensor is a flattened 1D tensor.
input_tensor = bucket.buffer()
# Run allreduce using `global_group_to_use` in the first `start_localSGD_iter` iterations.
if state.iter < state.start_localSGD_iter:
state.maybe_increase_iter(bucket)
return default._allreduce_fut(global_group_to_use, input_tensor)
# If `post_local_gradient_allreduce` is not set,
# then no gradient synchronization after the first `start_localSGD_iter` iterations.
if not state.post_local_gradient_allreduce:
fut: torch.futures.Future[torch.Tensor] = torch.futures.Future()
fut.set_result(input_tensor)
return fut
# Run allreduce using `subgroup` after the first `start_localSGD_iter` iterations.
# Note that by default, a separate subgroup for each node is created which
# causes an intra-node allreduce to be done at each training step.
# From this moment, model averaging should run after the optimizer step,
# to globally allreduce all the parameters.
if state.subgroup is None:
state.subgroup, _ = dist.new_subgroups()
return default._allreduce_fut(state.subgroup, input_tensor)
| pytorch-master | torch/distributed/algorithms/ddp_comm_hooks/post_localSGD_hook.py |
from enum import Enum
from functools import partial
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel
from . import (
debugging_hooks as debugging,
default_hooks as default,
powerSGD_hook as powerSGD,
quantization_hooks as quantization,
optimizer_overlap_hooks as optimizer_overlap,
)
__all__ = ['DDPCommHookType', 'register_ddp_comm_hook']
def _ddp_comm_hook_wrapper(comm_hook, model, state):
model.register_comm_hook(state, comm_hook)
def _powerSGD_comm_hook_wrapper(
comm_hook,
model,
state,
matrix_approximation_rank,
start_powerSGD_iter=1_000,
):
"""
To be consistent with the wrappers of other DDP comm hooks, the input state only needs to be a process group,
which will be wrapped up with other state info.
"""
powerSGD_state = powerSGD.PowerSGDState(
process_group=state,
matrix_approximation_rank=matrix_approximation_rank,
start_powerSGD_iter=start_powerSGD_iter,
)
model.register_comm_hook(powerSGD_state, comm_hook)
class DDPCommHookType(Enum):
"""
DDPCommHookType enumerates the hooks of ``torch.distributed.algorithms.ddp_comm_hooks``
as names and ``ddp_comm_hook_wrapper`` partials with hook specified. As an example,
you can register allreduce hook by
``DDPCommHookType.ALLREDUCE.value(model=model, state=process_group)``.
"""
ALLREDUCE = partial(_ddp_comm_hook_wrapper, comm_hook=default.allreduce_hook)
FP16_COMPRESS = partial(
_ddp_comm_hook_wrapper, comm_hook=default.fp16_compress_hook
)
BF16_COMPRESS = partial(
_ddp_comm_hook_wrapper, comm_hook=default.bf16_compress_hook
)
QUANTIZE_PER_TENSOR = partial(
_ddp_comm_hook_wrapper, comm_hook=quantization.quantization_pertensor_hook
)
QUANTIZE_PER_CHANNEL = partial(
_ddp_comm_hook_wrapper, comm_hook=quantization.quantization_perchannel_hook
)
POWER_SGD = partial(
_powerSGD_comm_hook_wrapper,
comm_hook=powerSGD.powerSGD_hook,
matrix_approximation_rank=1,
)
# Rank-2 PowerSGD can give a higher accuracy than the default rank-1 version,
# but it runs slower and consumes more memory.
POWER_SGD_RANK2 = partial(
_powerSGD_comm_hook_wrapper,
comm_hook=powerSGD.powerSGD_hook,
matrix_approximation_rank=2,
)
# Batching can lead to a faster training at the cost of accuracy.
BATCHED_POWER_SGD = partial(
_powerSGD_comm_hook_wrapper,
comm_hook=powerSGD.batched_powerSGD_hook,
matrix_approximation_rank=1,
)
BATCHED_POWER_SGD_RANK2 = partial(
_powerSGD_comm_hook_wrapper,
comm_hook=powerSGD.batched_powerSGD_hook,
matrix_approximation_rank=2,
)
NOOP = partial(
_ddp_comm_hook_wrapper, comm_hook=debugging.noop_hook,
)
def register_ddp_comm_hook(
comm_hook_type: DDPCommHookType, model: DistributedDataParallel, state=None
):
"""
Registers the hooks of ``torch.distributed.algorithms.ddp_comm_hooks``
to the DDP model. User can specify the type of hook as an enum
``DDPCommHookType`` type using ``comm_hook_type`` input. State input will
be passed to the model.
Uses Python comm hook implementations.
Example::
>>> # xdoctest: +SKIP
>>> register_ddp_comm_hook(DDPCommHookType.FP16_COMPRESS, model, state)
"""
comm_hook_type.value(model=model, state=state)
| pytorch-master | torch/distributed/algorithms/ddp_comm_hooks/__init__.py |
from typing import Any
import torch
from torch.distributed import GradBucket
def noop_hook(_: Any, bucket: GradBucket) -> torch.futures.Future[torch.Tensor]:
"""
This DDP communication hook returns a future that wraps the input,
so it is a noop that does not incur any communication overheads.
This hook should **only** be used for headroom analysis of allreduce optimization,
instead of the normal gradient synchronization.
For example, if only less than 10% speedup of training time can be observed after this hook is registered,
it usually implies that allreduce is not a performance bottleneck for this case.
Such instrumentation can be particularly useful
if GPU traces cannot be easily retrieved or the trace analysis is complicated
some factors such as the overlap between allreduce and computation or the desynchronization across ranks.
Example::
>>> # xdoctest: +SKIP
>>> ddp_model.register_comm_hook(None, noop_hook)
"""
fut: torch.futures.Future[torch.Tensor] = torch.futures.Future()
fut.set_result(bucket.buffer())
return fut
| pytorch-master | torch/distributed/algorithms/ddp_comm_hooks/debugging_hooks.py |
import weakref
from typing import Any, Callable, List, Optional
import torch
import torch.distributed as dist
from torch.distributed.optim import ZeroRedundancyOptimizer
from torch.distributed.optim.zero_redundancy_optimizer import (
_get_global_rank,
_OverlapStatus,
)
from torch.nn.parallel.distributed import DistributedDataParallel
# Functional optimizers require passing a list of gradients to their `step()`
# method, and ZeRO requires a functional optimizer to overlap with DDP
# Passing a `None` instead of an actual gradient indicates to the optimizer
# to not update the corresponding parameter
_NO_PARAM_UPDATE = None
def _perform_local_step(
bucket: dist.GradBucket,
zero: ZeroRedundancyOptimizer,
rank: int,
):
r"""
Performs a local optimizer step using the gradients provided by ``bucket``.
Arguments:
bucket (dist.GradBucket): the bucket providing the gradients.
zero (ZeroRedundancyOptimizer): the :class:`ZeroRedundancyOptimizer`
instance to perform the :meth:`_local_step`.
rank (int): the calling process's rank.
.. warning::
This function assumes that appropriate synchronization has taken place
so that the bucket's gradients can be used.
"""
overlap_info = zero._overlap_info
bucket_index = bucket.index()
assert len(zero.optim.param_groups) == 1, \
"Overlapping DDP with ZeRO only supports a single parameter group"
# Construct the `gradients` input for the local optimizer step, which
# expects `None` in a list position to indicate that the corresponding
# parameter should not be updated
num_local_optim_params = len(zero.optim.param_groups[0]["params"])
gradients: List[Optional[torch.Tensor]] = \
[_NO_PARAM_UPDATE for _ in range(num_local_optim_params)]
assert bucket_index in overlap_info.offsets, \
f"Bucket index {bucket_index} was not assigned to rank {rank}"
gradients_offset = overlap_info.offsets[bucket_index]
bucket_assignment = zero._bucket_assignments_per_rank[rank][bucket_index]
bucket_offset = bucket_assignment.offset
length = len(bucket_assignment.parameters)
bucket_gradients = bucket.gradients()[bucket_offset:bucket_offset + length]
for i, grad in enumerate(bucket_gradients):
gradients[gradients_offset + i] = grad
zero._local_step(gradients)
def _broadcast_bucket(
bucket_index: int,
zero: ZeroRedundancyOptimizer,
):
r"""
Broadcasts a bucket's parameters.
Arguments:
bucket_index (int): the index of the bucket corresponding to the
parameters to broadcast.
zero (ZeroRedundancyOptimizer): the calling process's
:class:`ZeroRedundancyOptimizer` instance.
"""
overlap_info = zero._overlap_info
assert len(overlap_info.assigned_ranks_per_bucket) > bucket_index, \
"`assigned_ranks_per_bucket` is not fully constructed"
# Sort to ensure the same ordering across ranks
assigned_ranks = sorted(overlap_info.assigned_ranks_per_bucket[bucket_index])
assert len(assigned_ranks) > 0, f"Bucket {bucket_index} should be " \
"assigned to at least one rank"
for assigned_rank in assigned_ranks:
bucket_assignments = zero._bucket_assignments_per_rank[assigned_rank]
if bucket_index in bucket_assignments:
overlap_info.broadcast_handles.append(
dist.broadcast(
bucket_assignments[bucket_index].tensor,
src=_get_global_rank(zero.process_group, assigned_rank),
group=zero.process_group,
async_op=True,
)
)
def _save_ddp_bucket_info(
bucket: dist.GradBucket,
zero: ZeroRedundancyOptimizer,
):
r"""
Saves :class:`DistributedDataParallel` gradient bucket information for the
:class:`ZeroRedundancyOptimizer` instance ``zero`` to use when overlapping.
In particular, this function is meant to be called upon seeing each
gradient bucket, meaning it does not save or compute any global
information.
Arguments:
bucket (dist.GradBucket): the current gradient bucket.
zero (ZeroRedundancyOptimizer): the calling process's
:class:`ZeroRedundancyOptimizer` instance.
"""
overlap_info = zero._overlap_info
bucket_params = bucket.parameters()
assert len(bucket_params) > 0, "Empty bucket"
# Save the parameters in the bucket
overlap_info.params_per_bucket.append(bucket_params)
if overlap_info.shard_buckets:
# Additionally save the bucket size for the assignment heuristic to use
bucket_size = 0
for param in bucket_params:
bucket_size += param.numel()
assert overlap_info.total_size is not None
overlap_info.total_size += bucket_size
def _hook_with_zero_step_setup(
ddp_ref: weakref.ReferenceType,
zero: ZeroRedundancyOptimizer,
bucket: dist.GradBucket,
):
r"""
Encapsulates the setup logic for :func:`hook_with_zero_step` and
:func:`hook_with_zero_step_interleaved`, meaning the logic to run in the
hook before the backward pass and optimizer step can actually be
overlapped. This is factored out since it is common to both
:func:`hook_with_zero_step` and :func:`hook_with_zero_step_interleaved`.
Arguments:
ddp_ref (weakref.ReferenceType): weak reference to the process's
:class:`DistributedDataParallel` instance.
zero (ZeroRedundancyOptimizer): the calling process's
:class:`ZeroRedundancyOptimizer` instance.
bucket (dist.GradBucket): the current gradient bucket.
"""
# Proceed as normal until the DDP buckets have been rebuilt
if not ddp_ref()._has_rebuilt_buckets: # type: ignore[union-attr]
assert zero._overlap_info.status == _OverlapStatus.UNINITIALIZED
return
bucket_index = bucket.index()
overlap_info = zero._overlap_info
if overlap_info.status == _OverlapStatus.UNINITIALIZED:
overlap_info.status = _OverlapStatus.DDP_HAS_REBUILT_BUCKETS
if overlap_info.status == _OverlapStatus.DDP_HAS_REBUILT_BUCKETS:
if bucket_index == 0 and len(overlap_info.params_per_bucket) > 0:
# This corresponds to the first bucket of the backward pass
# immediately after all information has been saved, so we
# can perform the delayed ZeRO initialization
zero._init_zero_for_overlap()
else:
# Once DDP buckets have been rebuilt but ZeRO has not been
# properly initialized yet, save the information needed
_save_ddp_bucket_info(bucket, zero)
def hook_with_zero_step(
hook: Callable[[Any, dist.GradBucket], torch.futures.Future],
ddp: DistributedDataParallel,
zero: ZeroRedundancyOptimizer,
shard_buckets: bool = False,
) -> Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]:
r"""
Modifies the given ``hook`` to overlap the :class:`ZeroRedundancyOptimizer`
optimizer step with the :class:`DistributedDataParallel` backward pass,
where the optimizer step computation begins after the last gradient bucket
computation has finished.
This approach overlaps the optimizer computation and communication with the
backward communication. In particular, the backward computation proceeds
contiguously, and the optimizer computation follows, overlapping with
outstanding backward communication (i.e. all-reduces) and possibly other
optimizer communication (i.e. broadcasts).
This approach may be preferred over :meth:`hook_with_zero_step_interleaved`
if communication is relatively slow compared to computation.
Arguments:
hook (Callable[[Any, dist.GradBucket], torch.futures.Future]): the hook
to modify.
ddp (DistributedDataParallel): the :class:`DistributedDataParallel`
instance to use.
zero (ZeroRedundancyOptimizer): the :class:`ZeroRedundancyOptimizer`
instance to use.
shard_buckets (bool): if ``True``, then the assignment of each
:class:`DistributedDataParallel` bucket is partitioned across
possibly multiple :class:`ZeroRedundancyOptimizer` instances (i.e.
across possibly multiple ranks) to approximate uniformity; if
``False``, then each bucket is wholly assigned to a single
:class:`ZeroRedundancyOptimizer` instance (i.e. to a single rank).
Returns:
The modified hook.
Raises:
ValueError: if ``zero`` was constructed with ``overlap_with_ddp=False``.
RuntimeError: if using any backend other than NCCL/HCCL since currently
Gloo may hang.
.. warning::
Given the way that overlapping :class:`DistributedDataParallel` with
:class:`ZeroRedundancyOptimizer` is currently implemented, the first
two or three training iterations do not perform parameter updates in
the optimizer step, depending on if ``static_graph=False`` or
``static_graph=True``, respectively. This is because it needs
information about the gradient bucketing strategy used by
:class:`DistributedDataParallel`, which is not finalized until the
second forward pass if ``static_graph=False`` or until the third
forward pass if ``static_graph=True``.
"""
if not zero._overlap_with_ddp:
raise ValueError(
"ZeroRedundancyOptimizer must be constructed with "
"`overlap_with_ddp=True` to use this hook properly"
)
ddp_ref = weakref.ref(ddp)
# NOTE: Gloo may hang with this overlapping approach, so we require
# NCCL/HCCL backend for now; see https://github.com/pytorch/pytorch/issues/62300
pg = dist.get_backend(ddp_ref().process_group) # type: ignore[union-attr]
if ((pg != dist.Backend.NCCL) and (pg != 'hccl')):
raise RuntimeError(
"Overlapping DDP with ZeRO using this approach currently requires "
"NCCL/HCCL backend to avoid hangs"
)
if shard_buckets:
zero._overlap_info.shard_buckets = True
zero._overlap_info.total_size = 0
def hook_with_zero_fn(
state: Any,
bucket: dist.GradBucket,
) -> torch.futures.Future[torch.Tensor]:
r"""
Returns a :class:`Future` that gives a gradient bucket tensor and
performs the equivalent of a :class:`ZeroRedundancyOptimizer`
:meth:`step` if ``bucket`` is the last gradient bucket.
The function performs additional computation on the iteration that
the :class:`DistributedDataParallel` buckets are rebuilt to collect
information used to implement the modified hook.
Arguments:
state (Any): any state for the hook.
bucket (dist.GradBucket): the :class:`DistributedDataParallel`
gradient bucket.
"""
fut = hook(state, bucket)
_hook_with_zero_step_setup(ddp_ref, zero, bucket)
if zero._overlap_info.status != _OverlapStatus.INITIALIZED:
return fut
overlap_info = zero._overlap_info
bucket_index = bucket.index()
rank = zero.global_rank
assert overlap_info.status == _OverlapStatus.INITIALIZED
assert len(overlap_info.assigned_ranks_per_bucket) > bucket_index, \
"`assigned_ranks_per_bucket` is not fully constructed"
assigned_to_bucket = rank in overlap_info.assigned_ranks_per_bucket[bucket_index]
# Save the bucket reference and all-reduce future for the final bucket
if assigned_to_bucket:
overlap_info.bucket_index_to_bucket[bucket_index] = bucket
overlap_info.bucket_index_to_future[bucket_index] = fut
# Check that buckets are indexed incrementally starting from 0 in the
# order of their autograd hooks firing
if len(overlap_info.bucket_indices_seen) > 0:
assert overlap_info.bucket_indices_seen[-1] == bucket_index - 1, \
"Bucket indices are not in incremental order"
else:
assert bucket_index == 0, "Bucket indices do not start from 0"
overlap_info.bucket_indices_seen.append(bucket_index)
# Directly return the future without any optimizer computation if this
# is not the last bucket
num_buckets = len(overlap_info.params_per_bucket)
is_last_bucket = bucket_index == num_buckets - 1
if not is_last_bucket:
return fut
# Perform partial optimizer step on all buckets after the final
# bucket has been computed
# NOTE: This should not be chained as a callback to the last bucket's
# all-reduce future since that would add synchronization that delays
# all optimizer computation to wait for that last all-reduce
for bucket_index in range(num_buckets):
assigned_ranks = overlap_info.assigned_ranks_per_bucket[bucket_index]
if rank in assigned_ranks:
# Wait on the bucket's all-reduce future to ensure correct
# gradients
assert bucket_index in overlap_info.bucket_index_to_future, \
f"All-reduce future for bucket {bucket_index} not saved " \
f"on rank {rank}"
allreduce_future = overlap_info.bucket_index_to_future[bucket_index]
allreduce_future.wait()
# Perform the partial optimizer step
curr_bucket = overlap_info.bucket_index_to_bucket[bucket_index]
_perform_local_step(curr_bucket, zero, rank)
_broadcast_bucket(bucket_index, zero)
# Ensure that all parameter updates are finished before the
# next forward pass
overlap_info.wait_for_broadcasts()
overlap_info.clear_per_iter_info()
return fut
return hook_with_zero_fn
def hook_with_zero_step_interleaved(
hook: Callable[[Any, dist.GradBucket], torch.futures.Future],
ddp: DistributedDataParallel,
zero: ZeroRedundancyOptimizer,
shard_buckets: bool = False,
) -> Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]:
r"""
Modifies the given ``hook`` to overlap the :class:`ZeroRedundancyOptimizer`
optimizer step with the :class:`DistributedDataParallel` backward pass,
where the optimizer step computation interleaves with the backward
computation.
This approach overlaps the optimizer computation and communication with the
backward computation and communication. In particular, once a bucket's
gradients have been computed, the optimizer computation using those
gradients is launched (though the actual computation must wait for the
bucket's all-reduce to complete). This yields an interleaving of all-
reduces and broadcasts in the communication stream.
This approach may be preferred over :meth:`hook_with_zero_step` if
communication is relatively fast compared to computation.
Arguments:
hook (Any * dist.GradBucket -> torch.futures.Future): the hook to
modify.
ddp (DistributedDataParallel): the :class:`DistributedDataParallel`
instance to use.
zero (ZeroRedundancyOptimizer): the :class:`ZeroRedundancyOptimizer`
instance to use.
shard_buckets (bool): if ``True``, then the assignment of each
:class:`DistributedDataParallel` bucket is partitioned across
possibly multiple :class:`ZeroRedundancyOptimizer` instances (i.e.
across possibly multiple ranks) to approximate uniformity; if
``False``, then each bucket is wholly assigned to a single
:class:`ZeroRedundancyOptimizer` instance (i.e. to a single rank).
Returns:
The modified hook.
Raises:
ValueError: if ``zero`` was constructed with ``overlap_with_ddp=False``.
RuntimeError: if using any backend other than NCCL since currently
Gloo may hang.
.. warning::
Given the way that overlapping :class:`DistributedDataParallel` with
:class:`ZeroRedundancyOptimizer` is currently implemented, the first
two or three training iterations do not perform parameter updates in
the optimizer step, depending on if ``static_graph=False`` or
``static_graph=True``, respectively. This is because it needs
information about the gradient bucketing strategy used by
:class:`DistributedDataParallel`, which is not finalized until the
second forward pass if ``static_graph=False`` or until the third
forward pass if ``static_graph=True``.
"""
if not zero._overlap_with_ddp:
raise ValueError(
"ZeroRedundancyOptimizer must be constructed with "
"`overlap_with_ddp=True` to use this hook properly"
)
ddp_ref = weakref.ref(ddp)
# NOTE: Gloo may hang with this overlapping approach, so we require
# NCCL/HCCL backend for now; see https://github.com/pytorch/pytorch/issues/62300
pg = dist.get_backend(ddp_ref().process_group) # type: ignore[union-attr]
if ((pg != dist.Backend.NCCL) and (pg != 'hccl')):
raise RuntimeError(
"Overlapping DDP with ZeRO using this approach currently requires "
"NCCL/HCCL backend to avoid hangs"
)
if shard_buckets:
zero._overlap_info.shard_buckets = True
zero._overlap_info.total_size = 0
def hook_with_zero_interleaved_fn(
state,
bucket: dist.GradBucket,
) -> torch.futures.Future[torch.Tensor]:
r"""
Returns a :class:`Future` that gives a gradient bucket tensor and
performs a partial :class:`ZeroRedundancyOptimizer` :meth:`step` using
the gradients in that bucket.
Arguments:
state: any state for the hook.
bucket (dist.GradBucket): the :class:`DistributedDataParallel`
gradient bucket.
"""
fut = hook(state, bucket)
_hook_with_zero_step_setup(ddp_ref, zero, bucket)
if zero._overlap_info.status != _OverlapStatus.INITIALIZED:
return fut
def zero_step(fut: torch.futures.Future) -> torch.Tensor:
r"""
Performs a partial :class:`ZeroRedundancyOptimizer` :meth:`step`
using the gradients in the given :class:`DistributedDataParallel`
gradient bucket.
Returns:
A :class:`torch.Tensor` representing the contents of the
gradient bucket.
"""
overlap_info = zero._overlap_info
bucket_index = bucket.index()
rank = zero.global_rank
assigned_ranks = overlap_info.assigned_ranks_per_bucket[bucket_index]
overlap_info.bucket_indices_seen.append(bucket_index)
if rank in assigned_ranks:
_perform_local_step(bucket, zero, rank)
_broadcast_bucket(bucket_index, zero)
num_buckets = len(overlap_info.params_per_bucket)
if len(overlap_info.bucket_indices_seen) == num_buckets:
# Ensure that all parameter updates are finished before the
# next forward pass
overlap_info.wait_for_broadcasts()
overlap_info.clear_per_iter_info()
return bucket.buffer()
return fut.then(zero_step)
return hook_with_zero_interleaved_fn
| pytorch-master | torch/distributed/algorithms/ddp_comm_hooks/ddp_zero_hook.py |
from typing import Any, Callable
import torch
import torch.distributed as dist
def _allreduce_fut(
process_group: dist.ProcessGroup, tensor: torch.Tensor
) -> torch.futures.Future[torch.Tensor]:
"Averages the input gradient tensor by allreduce and returns a future."
group_to_use = process_group if process_group is not None else dist.group.WORLD
# Apply the division first to avoid overflow, especially for FP16.
tensor.div_(group_to_use.size())
return (
dist.all_reduce(tensor, group=group_to_use, async_op=True)
.get_future()
.then(lambda fut: fut.value()[0])
)
def allreduce_hook(
process_group: dist.ProcessGroup, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
"""
This DDP communication hook just calls ``allreduce`` using ``GradBucket``
tensors. Once gradient tensors are aggregated across all workers, its ``then``
callback takes the mean and returns the result. If user registers this hook,
DDP results is expected to be same as the case where no hook was registered.
Hence, this won't change behavior of DDP and user can use this as a reference
or modify this hook to log useful information or any other purposes while
unaffecting DDP behavior.
Example::
>>> # xdoctest: +SKIP
>>> ddp_model.register_comm_hook(process_group, allreduce_hook)
"""
return _allreduce_fut(process_group, bucket.buffer())
def fp16_compress_hook(
process_group: dist.ProcessGroup, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
"""
This DDP communication hook implements a simple gradient compression
approach that casts ``GradBucket`` tensor to half-precision floating-point format (``torch.float16``)
and then divides it by the process group size.
It allreduces those ``float16`` gradient tensors. Once compressed gradient
tensors are allreduced, the chained callback ``decompress`` casts it back to the input data type (such as ``float32``).
Example::
>>> # xdoctest: +SKIP
>>> ddp_model.register_comm_hook(process_group, fp16_compress_hook)
"""
group_to_use = process_group if process_group is not None else dist.group.WORLD
world_size = group_to_use.size()
compressed_tensor = bucket.buffer().to(torch.float16).div_(world_size)
fut = dist.all_reduce(
compressed_tensor, group=group_to_use, async_op=True
).get_future()
def decompress(fut):
decompressed_tensor = bucket.buffer()
# Decompress in place to reduce the peak memory.
# See: https://github.com/pytorch/pytorch/issues/45968
decompressed_tensor.copy_(fut.value()[0])
return decompressed_tensor
return fut.then(decompress)
# TODO: create an internal helper function and extract the duplicate code in FP16_compress and BF16_compress.
def bf16_compress_hook(
process_group: dist.ProcessGroup, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
"""
Warning: This API is experimental, and it requires NCCL version later than 2.9.6.
This DDP communication hook implements a simple gradient compression
approach that casts ``GradBucket`` tensor to half-precision
`Brain floating point format <https://en.wikipedia.org/wiki/Bfloat16_floating-point_format>`_ (``torch.bfloat16``)
and then divides it by the process group size.
It allreduces those ``bfloat16`` gradient tensors. Once compressed gradient
tensors are allreduced, the chained callback ``decompress`` casts it back to the input data type (such as ``float32``).
Example::
>>> # xdoctest: +SKIP
>>> ddp_model.register_comm_hook(process_group, bf16_compress_hook)
"""
group_to_use = process_group if process_group is not None else dist.group.WORLD
world_size = group_to_use.size()
compressed_tensor = bucket.buffer().to(torch.bfloat16).div_(world_size)
fut = dist.all_reduce(
compressed_tensor, group=group_to_use, async_op=True
).get_future()
def decompress(fut):
decompressed_tensor = bucket.buffer()
# Decompress in place to reduce the peak memory.
# See: https://github.com/pytorch/pytorch/issues/45968
decompressed_tensor.copy_(fut.value()[0])
return decompressed_tensor
return fut.then(decompress)
def fp16_compress_wrapper(
hook: Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]
) -> Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]:
"""
This wrapper casts the input gradient tensor of a given DDP communication hook to half-precision
floating point format (``torch.float16``), and casts the resulting tensor of the given hook back to
the input data type, such as ``float32``.
Therefore, ``fp16_compress_hook`` is equivalent to ``fp16_compress_wrapper(allreduce_hook)``.
Example::
>>> # xdoctest: +SKIP
>>> state = PowerSGDState(process_group=process_group, matrix_approximation_rank=1, start_powerSGD_iter=10)
>>> ddp_model.register_comm_hook(state, fp16_compress_wrapper(powerSGD_hook))
"""
def fp16_compress_wrapper_hook(
hook_state, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
# Cast bucket tensor to FP16.
bucket.set_buffer(bucket.buffer().to(torch.float16))
fut = hook(hook_state, bucket)
def decompress(fut):
decompressed_tensor = bucket.buffer()
# Decompress in place to reduce the peak memory.
# See: https://github.com/pytorch/pytorch/issues/45968
decompressed_tensor.copy_(fut.value())
return decompressed_tensor
# Decompress after hook has run.
return fut.then(decompress)
return fp16_compress_wrapper_hook
def bf16_compress_wrapper(
hook: Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]
) -> Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]:
"""
Warning: This API is experimental, and it requires NCCL version later than 2.9.6.
This wrapper casts the input gradient tensor of a given DDP communication hook to half-precision
`Brain floating point format <https://en.wikipedia.org/wiki/Bfloat16_floating-point_format> `_ (``torch.bfloat16``),
and casts the resulting tensor of the given hook back to the input data type, such as ``float32``.
Therefore, ``bf16_compress_hook`` is equivalent to ``bf16_compress_wrapper(allreduce_hook)``.
Example::
>>> # xdoctest: +SKIP
>>> state = PowerSGDState(process_group=process_group, matrix_approximation_rank=1, start_powerSGD_iter=10)
>>> ddp_model.register_comm_hook(state, bf16_compress_wrapper(powerSGD_hook))
"""
def bf16_compress_wrapper_hook(
hook_state, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
# Cast bucket tensor to BF16.
bucket.set_buffer(bucket.buffer().to(torch.bfloat16))
fut = hook(hook_state, bucket)
def decompress(fut):
decompressed_tensor = bucket.buffer()
# Decompress in place to reduce the peak memory.
# See: https://github.com/pytorch/pytorch/issues/45968
decompressed_tensor.copy_(fut.value())
return decompressed_tensor
# Decompress after hook has run.
return fut.then(decompress)
return bf16_compress_wrapper_hook
| pytorch-master | torch/distributed/algorithms/ddp_comm_hooks/default_hooks.py |
from collections import defaultdict
import logging
import math
from typing import Dict
import numpy as np
import torch
import torch.distributed as dist
from . import default_hooks as default
from torch.distributed import distributed_c10d
__all__ = [
"PowerSGDState", "powerSGD_hook", "batched_powerSGD_hook"
]
logger = logging.getLogger(__name__)
def _orthogonalize(matrices, epsilon=0):
"""
Decide between Gram-Schmidt or QR factorization to orthogonalize a batch of matrices.
QR factorization doesn't work with half-precision, but it is usually faster with a rank > 2.
"""
assert len(matrices.shape) == 3 and matrices.shape[2] <= matrices.shape[1]
num_matrices = matrices.shape[0]
rank = matrices.shape[2]
dtype = matrices.dtype
if rank <= 2 or dtype in [torch.float16, torch.bfloat16]:
_orthogonalize_gram_schmidt(matrices, epsilon=epsilon)
else:
torch.linalg.qr(
matrices,
out=(
matrices,
torch.empty(num_matrices, rank, rank, device=matrices.device, dtype=dtype)
)
)
def _orthogonalize_gram_schmidt(matrices, epsilon=0):
"""
Applies Gram-Schmidt procedure to orthogonalize a batch of matrices.
If epsilon is 0, this is equivalent to `torch.qr(matrices, out=(matrices, _))`,
"""
num_cols = matrices.shape[2]
for i in range(num_cols):
# Normalize the i'th column.
col = matrices[:, :, i : i + 1]
# If no epsilon is added here, division by zero may be caused by vanishing gradients.
# This epsilon is not needed if the input batch of matrices covers the gradients of at least one entire layer
# in the neural network.
if epsilon == 0:
# Note that col ** 2 can underflow/overflow if we use FP16.
# May need to consider multiplying a scaling factor and dividing it later, or using bfloat16 instead.
try:
col /= torch.norm(col, dim=1, keepdim=True)
except ZeroDivisionError:
logger.error(
"The matrices to be orthogonalized has at least a column of all 0s. Please set a small value such as 1e-8 "
"as `orthogonalization_epsilon` in PowerSGD state."
)
# Recover the values from NaNs to 0s.
col.fill_(0.0)
else:
col /= torch.norm(col, dim=1, keepdim=True) + epsilon
# Project it on the rest and remove it.
if i + 1 < num_cols:
rest = matrices[:, :, i + 1 :]
rest -= torch.sum(col * rest, dim=1, keepdim=True) * col
def _should_compress(
num_rows, num_cols, matrix_approximation_rank, min_compression_rate
):
"""
Returns a recommendation as to whether the 2D tensor described by the arguments is worth compressing,
including statistics describing the expected savings from compression. We consider a tensor worth
compressing when ``min_compression_rate`` < uncompressed size / compressed size, where
uncompressed size = ``num_rows`` * ``num_cols``,
and compressed size = (``num_rows`` + ``num_cols``) * ``matrix_approximation_rank``.
The result of this function is a tuple of the form (compression_recommendation, uncompressed_el_count, compressed_el_count), where:
compresion_recommendation is true if the tensor is worth compressing, and false otherwise (see above);
uncompressed_el_count is the uncompressed element count, i.e. ``num_rows`` * ``num_cols``; and,
compress_el_count is the element count after compression, i.e. (``num_rows`` + ``num_cols``) * ``matrix_approximation_rank``.
""" # noqa: B950
uncompressed_size = num_rows * num_cols
compressed_size = (num_rows + num_cols) * matrix_approximation_rank
return (
compressed_size * min_compression_rate < uncompressed_size,
uncompressed_size,
compressed_size,
)
def _report_compression_stats(bucket, state):
"""
Report compression stats at the frequency of `compression_stats_logging_frequency` specified in PowerSGD state.
"""
if (
bucket.is_last()
and state.iter >= state.next_stats_report
):
stats = state.compression_stats()
logger.info(
"Compression stats: iter {}, total before compression {}, total after compression {}, "
"rate {}".format(state.iter, stats[1], stats[2], stats[0])
)
state.next_stats_report = state.iter + state.compression_stats_logging_frequency
class PowerSGDState(object):
r"""
Stores both the algorithm's hyperparameters and the internal state for all the gradients during the training.
Particularly, ``matrix_approximation_rank`` and ``start_powerSGD_iter`` are the main hyperparameters that should be tuned by the user.
For performance, we suggest to keep binary hyperparameters ``use_error_feedback`` and ``warm_start`` on.
1. ``matrix_approximation_rank`` controls the size of compressed low-rank tensors, which determines the compression rate. The lower the rank, the stronger the compression.
1.1. If ``matrix_approximation_rank`` is too low, the full model quality will need more training steps to reach or will never reach and yield loss in accuracy.
1.2. The increase of ``matrix_approximation_rank`` can substantially increase the computation costs of the compression, and the accuracy may not be futher improved beyond a certain ``matrix_approximation_rank`` threshold.
To tune ``matrix_approximation_rank``, we suggest to start from 1 and increase by factors of 2 (like an expoential grid search, 1, 2, 4, ...), until a satisfactory accuracy is reached. Typically only a small value 1-4 is used. For some NLP tasks (as shown in Appendix D of the original paper), this value has been increased to 32.
2. ``start_powerSGD_iter`` defers PowerSGD compression until step ``start_powerSGD_iter``, and vanilla allreduce runs prior to step ``start_powerSGD_iter``. This hybrid scheme of **vanilla allreduce + PowerSGD** can effectively improve the accuracy, even a relatively small ``matrix_approximation_rank`` is used. This is because that, the beginning of training phase is usually very sensitive to inaccurate gradients, and compressing gradients too early may make the training quickly take a suboptimal trajectory, which can result in an irrecoverable impact on the accuracy.
To tune ``start_powerSGD_iter``, we suggest to start with 10% of total training steps, and increase it until a satisfactory accuracy is reached. If there is a warm-up stage in the training, ``start_powerSGD_iter`` typically should be no less than the number of warm-up steps.
3. ``min_compression_rate`` is the minimum compression rate required when a layer is compressed. Due to the computation overheads incurred by the compression, a tensor is worth compressing only if there can be sufficient saving in bandwidth, where ``(num_rows + num_cols) * matrix_approximation_rank * min_compression_rate < num_rows * num_cols``. If the specified compression rate threshold cannot be satisfied, the tensor will be directly allreduced without compression.
Compression statistics are logged every ``compression_stats_logging_frequency`` iterations once PowerSGD compression starts.
4. ``orthogonalization_epsilon`` can be a very small value (e.g., 1e-8) added to every normalized matrix column in orthogonalization step, to prevent div-by-zero error if any column has all 0s. If this can already be prevented (e.g., by batch normalization), an epsilon of 0 is recommended for accuracy.
5. ``batch_tensors_with_same_shape`` controls whether to compress and decompress tensors with same shape in a batched operation to achieve higher parallelism. Note that you should also increase the bucket size (i.e., ``bucket_cap_mb`` arg in DDP constructor) to make more same-shaped tensors appear in the same bucket, however this may reduce the overlap between computation and communication, and increase the memory footprint due to stacking the tensors of the same shape. Set to ``True`` if the compression / decompression computation is a bottleneck.
.. warning ::
If error feedback or warm-up is enabled, the minimum value of ``start_powerSGD_iter`` allowed in DDP is 2.
This is because there is another internal optimization that rebuilds buckets at iteration 1 in DDP,
and this can conflict with any tensor memorized before the rebuild process.
""" # noqa: B950
__slots__ = [
"process_group",
# The fields below are the hyperparameters that often need to be tuned by the user.
"matrix_approximation_rank",
"start_powerSGD_iter",
# The fields below are the hyperparameters that seldom need be tuned by the user.
"min_compression_rate",
"orthogonalization_epsilon",
# The fields below are the binary hyperparameters recommended to be turned on for performance and accuracy.
"use_error_feedback",
"warm_start",
"batch_tensors_with_same_shape",
# The fields below are internal state.
"rng",
"error_dict",
"p_memory_dict",
"q_memory_dict",
"iter",
# The fields below are for recording compression stats.
"total_numel_before_compression",
"total_numel_after_compression",
"compression_stats_logging_frequency",
"next_stats_report",
]
def __init__(
self,
process_group,
matrix_approximation_rank=1,
start_powerSGD_iter=1_000,
min_compression_rate=2,
use_error_feedback=True,
warm_start=True,
orthogonalization_epsilon=0,
random_seed=0,
compression_stats_logging_frequency=10_000,
batch_tensors_with_same_shape: bool = False,
):
logger.info(
"PowerSGD config: matrix_approximation_rank = {}; start_powerSGD_iter = {}; "
"min_compression_rate = {}; orthogonalization_epsilon = {}; use_error_feedback = {}; warm_start = {}; "
"random_seed = {}; compression_stats_logging_frequency = {}; batch_tensors_with_same_shape = {}".format(
matrix_approximation_rank,
start_powerSGD_iter,
min_compression_rate,
orthogonalization_epsilon,
use_error_feedback,
warm_start,
random_seed,
compression_stats_logging_frequency,
batch_tensors_with_same_shape,
)
)
self.process_group = process_group
self.matrix_approximation_rank = matrix_approximation_rank
# Deferring PowerSGD compression util step 'start_powerSGD_iter' can have two advantages:
# 1) It turns out that PowerSGD may lead to a non-trivial accuracy loss,
# even if the matrix approximation rank is increased to a large value.
# To mitigate the accuracy loss, a simple yet effective way is mixing vanilla allreduce
# (or a more conservative compression such as FP16 compression) with PowerSGD.
# 2) There is an internal optimization of rebuilding buckets process in DDP,
# in order to save the memory space.
# This step takes place after the first iteration.
# However, this means that the shape of input bucketized tensors is subject to change,
# which will complicate the implementations of error feedback and warm-up.
# Running vanilla allreduce in the first few iterations can avoid this complexity.
if (use_error_feedback or warm_start) and start_powerSGD_iter <= 1:
raise ValueError(
"Expect `start_powerSGD_iter` > 1 if `use_error_feedback` or `warm_start` is enabled, "
"because PowerSGD can only be applied after the first two iterations in DDP."
)
self.start_powerSGD_iter = start_powerSGD_iter
self.min_compression_rate = min_compression_rate
# Error feedback is usually crucial for both for convergence and generalization,
# because PowerSGD is a biased compressor,
# i.e., compressing and decompressing a random gradient does not yield the original in expectation.
# This mechanism requires a temporary copy of the input gradients,
# so it increases the peak memory consumption by the size of the gradient tensor.
# However, if the target matrices are known to be exactly low-ranked (instead of just low stable rank),
# sometimes it is possible to converge to the optima without error feedback.
# See: http://proceedings.mlr.press/v54/yurtsever17a/yurtsever17a.pdf
self.use_error_feedback = use_error_feedback
# Warm-start reuses P(s) and Q(s) from the previous iteration.
# This can improve the approximation quality and hence improve the accuracy.
# Additionally, by avoiding the initialization of these low-rank tensors at every step,
# this can also accelerate training.
# However, this is at the cost of extra memory.
self.warm_start = warm_start
# Can use a very small value to prevent div-by-zero error caused by orthogonalization of vanishing gradients.
self.orthogonalization_epsilon = orthogonalization_epsilon
# The purpose of this RNG is to generate different random seeds for initializing Q across iterations,
# but in the same order for all the DDP replicas.
# Different random seeds across iterations indicate different 'projections' of the gradients at different SGD steps.
# If the same random projection is used,
# there will be differences between the gradients that are never synchronized.
self.rng = np.random.RandomState(random_seed)
# Since there is only a single state instance for all the input buckets,
# need to maintain a dictionary that maps each bucket index to the local error.
self.error_dict: Dict[int, torch.Tensor] = {}
self.p_memory_dict: Dict[int, torch.Tensor] = {}
self.q_memory_dict: Dict[int, torch.Tensor] = {}
# Iteration/step in the training loop.
self.iter = 0
# Compression stats accumulators
self.total_numel_before_compression = 0
self.total_numel_after_compression = 0
# We'll report compression stats every 'compression_stats_logging_frequency' iterations
# Note that we always report compression stats at least once.
self.compression_stats_logging_frequency = max(
1, compression_stats_logging_frequency
)
self.next_stats_report = 0
# Batching tensors with same shape can increase parallelism in compressiom / decompression computation.
# This requires a larger bucket size to make more same-shaped tensor to appear in one bucket, however
# this may reduce the overlap between computation and communication, and increase the memory footprint
# due to stacking tensors.
# Turn on if compression / decompression computation is a bottleneck.
self.batch_tensors_with_same_shape = batch_tensors_with_same_shape
def __getstate__(self):
r"""
Returns a ``Dict[str, Any]`` which will be pickled and saved.
``process_group`` is not serializable and excluded from
a returned state.
"""
logger.warning(
"NOTE: Process group is not serializable and excluded from a saved state."
)
return {
slot: getattr(self, slot)
for slot in self.__slots__ if slot != "process_group"
}
def __setstate__(self, state):
r"""
Takes a provided ``state`` and retrieves ``PowerSGDState``.
``process_group`` is set to default.
"""
self.process_group = distributed_c10d._get_default_group()
logger.warning(
"NOTE: Process group will be set to a default group (i.e. the world size).\
If a different group is desired, please set `self.process_group` after PowerSGD state is loaded."
)
for slot, value in state.items():
setattr(self, slot, value)
def maybe_increase_iter(self, bucket):
# Since bucket 0 is the last bucket to allreduce in an iteration.
# Only increase `iter` when bucket 0 is processed.
if bucket.is_last():
self.iter += 1
if self.iter == self.start_powerSGD_iter:
logger.info(
"Start to apply PowerSGD after {} iterations.".format(self.iter)
)
def compression_stats(self):
r"""
Returns the latest compression statistics as a tuple of the form (compress_rate, numel_before_compression, numel_after_compression), where:
compress_rate is the effective compression rate i.e. (number of elements before compression) / (number of elements after compression);
numel_before_compression is the total number of elements before compression was applied; and,
numel_after_compression is the total number of elements after compression was applied.
""" # noqa: B950
compress_rate = (
self.total_numel_before_compression / self.total_numel_after_compression
if self.total_numel_after_compression > 0
else 0
)
return (
compress_rate,
self.total_numel_before_compression,
self.total_numel_after_compression,
)
def powerSGD_hook(
state: PowerSGDState, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
r"""
This DDP communication hook implements PowerSGD gradient compression
algorithm described in the `paper <https://arxiv.org/abs/1905.13727>`_.
Once gradient tensors are aggregated across all workers, this hook applies
compression as follows:
1. Views the input flattened 1D gradient tensor as a list of per-parameter tensors, and divides all the tensors into two groups:
1.1 The tensors that should be compressed before allreduce, because the compression can give enough saving in bandwidth.
1.2 Rest of the tensors will be directly allreduced without compression, including all the vector tensors (for biases).
2. Handles uncompressed tensors:
2.1. Allocate contiguous memory for those uncompressed tensors, and allreduces all the uncompressed tensors as a batch, without compression;
2.2. Copies the individual uncompressed tensors from the contiguous memory back to the input tensor.
3. Handles the tensors that should be compressed by PowerSGD compression:
3.1. For each tensor M, creates two low-rank tensors P and Q for decomposing M,
such that M = PQ^T, where Q is initialized from a standard normal distribution and orthogonalized;
3.2. Computes each P in Ps, which is equal to MQ;
3.3. Allreduces Ps as a batch;
3.4. Orthogonalizes each P in Ps;
3.5. Computes each Q in Qs, which is approximately equal to M^TP;
3.6. Allreduces Qs as a batch;
3.7. Computes each M among all the compressed tensors, which is approximately equal to PQ^T.
Note that this communication hook enforces vanilla allreduce for the first ``state.start_powerSGD_iter`` iterations.
This not only gives the user more control over the tradeoff between speedup and accuracy,
but also helps abstract away some complexity of the internal optimization of DDP for future communication hook developers.
Args:
state (PowerSGDState): State information to configure the compression rate and support error feedback, warm start, etc.
To tune the compression configs, mainly need to tune ``matrix_approximation_rank``, ``start_powerSGD_iter``
and ``min_compression_rate``.
bucket (dist.GradBucket): Bucket that stores a 1D flattened gradient tensor that batches multiple per-variable tensors.
Note that since DDP comm hook only supports single process single device mode,
only exactly one tensor is stored in this bucket.
Returns:
Future handler of the communication, which updates the gradients in place.
Example::
>>> # xdoctest: +SKIP
>>> state = PowerSGDState(process_group=process_group, matrix_approximation_rank=1,
start_powerSGD_iter=10, min_compression_rate=0.5)
>>> ddp_model.register_comm_hook(state, powerSGD_hook)
""" # noqa: B950
process_group = state.process_group
group_to_use = process_group if process_group is not None else dist.group.WORLD
world_size = group_to_use.size()
# The input tensor is a flattened 1D tensor.
input_tensor = bucket.buffer()
# Run vanilla allreduce in the first `start_powerSGD_iter` iterations.
if state.iter < state.start_powerSGD_iter:
state.maybe_increase_iter(bucket)
return default._allreduce_fut(group_to_use, input_tensor)
# Apply PowerSGD after `start_powerSGD_iter` iterations.
device = input_tensor.device
dtype = input_tensor.dtype
# Incorporate the error from the previous state into the gradients.
bucket_index = bucket.index()
input_tensor_cp = None
total_length = input_tensor.shape[0]
if state.use_error_feedback:
if bucket_index in state.error_dict:
input_tensor.add_(state.error_dict[bucket_index])
else:
logger.info(
"A zero tensor of length {} that represents local error is created.".format(
total_length
)
)
state.error_dict[bucket_index] = torch.zeros(
total_length, device=device, dtype=dtype
)
# Keep a copy of the input tensor,
# so that we can compute the local error caused by compression later,
# by comparing this copy and the input tensor updated after decompression.
input_tensor_cp = torch.clone(input_tensor).detach()
# Unflatten the input tensor into per-parameter tensors, for layer-wise compression.
tensors = bucket.gradients()
# Step I: Divide all the tensors into two groups,
# one will be compressed before allreduce and the other will be directly allreduced without compression.
tensors_to_compress, uncompressed_tensors = [], []
total_Ps_size = 0
total_Qs_size = 0
for tensor in tensors:
matrix = tensor.view(tensor.shape[0], -1)
n, m = matrix.shape
matrix_approximation_rank = min(n, m, state.matrix_approximation_rank)
compress_test = _should_compress(
n, m, matrix_approximation_rank, state.min_compression_rate
)
state.total_numel_before_compression += compress_test[1]
if compress_test[0]:
tensors_to_compress.append(matrix)
total_Ps_size += n * matrix_approximation_rank
total_Qs_size += m * matrix_approximation_rank
state.total_numel_after_compression += compress_test[2]
else:
uncompressed_tensors.append(tensor)
state.total_numel_after_compression += compress_test[1]
_report_compression_stats(bucket, state)
# Step II: Handle uncompressed tensors.
# Allocate contiguous memory for these tensors to allreduce efficiently.
uncompressed_tensors_memory = (
torch.cat([tensor.view(-1) for tensor in uncompressed_tensors])
if uncompressed_tensors
else torch.tensor([], device=device, dtype=dtype)
)
# Step III: Handle the tensors that should be compressed.
# Allocate contiguous memory for Ps and Qs to allreduce efficiently.
# If warm-start is enabled, reuse Ps and Qs from the previous iteration if possible.
# The memory spaces of Ps and Qs need to be allocated in the first iteration when PowerSGD is applied.
need_randomize_qs = False
if not state.warm_start or bucket_index not in state.p_memory_dict:
need_randomize_qs = True
# If warm-start is disabled, low-rank tensors will be initialized at every step.
# Only log this if warm-start to avoid spamming.
if state.warm_start:
logger.info(
"Allocating contiguous memory of length {} for Ps, and of length {} for Qs, respectively.".format(
total_Ps_size, total_Qs_size
)
)
state.p_memory_dict[bucket_index] = torch.empty(
total_Ps_size, device=device, dtype=dtype
)
state.q_memory_dict[bucket_index] = torch.empty(
total_Qs_size, device=device, dtype=dtype
)
# Batch tensors to compress by shape.
shape_to_tensors = defaultdict(list)
for tensor in tensors_to_compress:
shape_to_tensors[tensor.shape].append(tensor)
# This function decides whether to batch tensors with same shape or not according to the argument,
# so the following process could share the same code.
def maybe_batched_tensors_to_compress():
for tensors in shape_to_tensors.values():
if state.batch_tensors_with_same_shape:
batch_size = len(tensors)
if batch_size == 1:
# Use the original tensor to avoid copy.
yield tensors[0].unsqueeze(0)
else:
yield torch.stack(tensors)
else:
for tensor in tensors:
yield tensor.unsqueeze(0)
# Create Ps and Qs that point to the allocated memory.
tensors_to_compress = []
ps = []
qs = []
p_idx = 0
q_idx = 0
for tensor in maybe_batched_tensors_to_compress():
batch_size, n, m = tensor.shape
matrix_approximation_rank = min(n, m, state.matrix_approximation_rank)
tensors_to_compress.append(tensor)
ps.append(
state.p_memory_dict[bucket_index][
p_idx : p_idx + batch_size * n * matrix_approximation_rank
].view(batch_size, n, matrix_approximation_rank)
)
qs.append(
state.q_memory_dict[bucket_index][
q_idx : q_idx + batch_size * m * matrix_approximation_rank
].view(batch_size, m, matrix_approximation_rank)
)
p_idx += batch_size * n * matrix_approximation_rank
q_idx += batch_size * m * matrix_approximation_rank
# If warm-start is enabled, reuse Qs from the previous iteration if possible and skip filling random values.
# The exception is the first iteration when PowerSGD is applied.
if not need_randomize_qs:
for q in qs:
_orthogonalize(q, state.orthogonalization_epsilon)
else:
with torch.random.fork_rng(devices=[]):
# Fork this RNG to avoid changing the seed globally and affecting the random sampling anywhere else in the training.
# The seed makes sure that the initial random values are the same across all the DDP replicas.
# This seed should differ at every step.
# Since it is very slow to fork RNG state across all the CUDA devices,
# only fork on CPU and then move the generated tensor to the CUDA device (by overwriting q).
torch.manual_seed(state.rng.randint(1_000_000_000))
for q in qs:
q.copy_(
torch.randn(
*q.shape,
device="cpu",
dtype=dtype,
)
)
_orthogonalize(q, state.orthogonalization_epsilon)
# Compute Ps.
for tensor, q, p in zip(tensors_to_compress, qs, ps):
torch.bmm(tensor, q, out=p)
# This allreduce is only applied to uncompressed tensors,
# so it should have been kicked off before the above computation on the compressed tensors to hide more communication costs.
# However, this somehow requires a separate future chain at this time.
allreduce_contiguous_uncompressed_tensors_fut = dist.all_reduce(
uncompressed_tensors_memory, group=group_to_use, async_op=True
).get_future()
def unpack_uncompressed_tensors_and_allreduce_ps(fut):
uncompressed_tensors_memory = fut.value()[0].div_(world_size)
idx = 0
for tensor in uncompressed_tensors:
tensor.copy_(
uncompressed_tensors_memory[idx : idx + tensor.numel()].view_as(tensor)
)
idx += tensor.numel()
# Since these Ps will be orthogonalized later, no need to divide them by world size.
return (
dist.all_reduce(
state.p_memory_dict[bucket_index], group=group_to_use, async_op=True
)
.get_future()
.wait()[0]
)
def compute_qs(fut):
state.p_memory_dict[bucket_index] = fut.value()
for p in ps:
_orthogonalize(p, state.orthogonalization_epsilon)
# Compute Qs.
for tensor, p, q in zip(tensors_to_compress, ps, qs):
torch.bmm(tensor.transpose(1, 2), p, out=q)
# TODO: The above procedure does two matmul+allreduce steps per iteration --
# one left multiplication and one right multiplication.
# For warm-start, can take one such step at a time, and alternate between them.
# Allreduce Qs.
return (
dist.all_reduce(
state.q_memory_dict[bucket_index], group=group_to_use, async_op=True
)
.get_future()
.wait()[0]
)
def decompress(fut):
state.q_memory_dict[bucket_index] = fut.value().div_(world_size)
for p, q, tensor in zip(ps, qs, tensors_to_compress):
torch.bmm(p, q.transpose(1, 2), out=tensor)
# Copy batched tensors back to original buffer.
if state.batch_tensors_with_same_shape:
for tensor in tensors_to_compress:
if tensor.shape[0] == 1:
# Skip tensor with batch_size == 1 since itself is the original tensor.
continue
original_tensors = shape_to_tensors[tensor.shape[1:]]
for i, original_tensor in enumerate(original_tensors):
original_tensor.copy_(tensor[i])
if torch.cuda.is_available():
torch.cuda.synchronize(device)
if state.use_error_feedback:
# Memorize the local errors.
state.error_dict[bucket_index] = input_tensor_cp - input_tensor
if not state.warm_start:
state.p_memory_dict.clear()
state.q_memory_dict.clear()
state.maybe_increase_iter(bucket)
return input_tensor
return (
allreduce_contiguous_uncompressed_tensors_fut.then(
unpack_uncompressed_tensors_and_allreduce_ps
)
.then(compute_qs)
.then(decompress)
)
def batched_powerSGD_hook(
state: PowerSGDState, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
r"""
This DDP communication hook implements a simplified PowerSGD gradient compression
algorithm described in the `paper <https://arxiv.org/abs/1905.13727>`_.
This variant does not compress the gradients layer by layer,
but instead compresses the flattened input tensor that batches all the gradients.
Therefore, it is **faster** than :meth:`powerSGD_hook`,
but usually results in a **much lower accuracy**, unless ``matrix_approximation_rank`` is 1.
.. warning ::
Increasing ``matrix_approximation_rank`` here may not necessarily increase the accuracy,
because batching per-parameter tensors without column/row alignment can destroy low-rank structure.
Therefore, the user should always consider :meth:`powerSGD_hook` first,
and only consider this variant when a satisfactory accuracy can be achieved when ``matrix_approximation_rank`` is 1.
Once gradient tensors are aggregated across all workers, this hook applies
compression as follows:
1. Views the input flattened 1D gradient tensor as a square-shaped tensor M with 0 paddings;
2. Creates two low-rank tensors P and Q for decomposing M, such that M = PQ^T, where Q is initialized from a standard normal distribution and orthogonalized;
3. Computes P, which is equal to MQ;
4. Allreduces P;
5. Orthogonalizes P;
6. Computes Q, which is approximately equal to M^TP;
7. Allreduces Q;
8. Computes M, which is approximately equal to PQ^T.
9. Truncates the input tensor to the original length.
Note that this communication hook enforces vanilla allreduce for the first ``state.start_powerSGD_iter`` iterations.
This not only gives the user more control over the tradeoff between speedup and accuracy,
but also helps abstract away some complexity of the internal optimization of DDP for future communication hook developers.
Args:
state (PowerSGDState): State information to configure the compression rate and support error feedback, warm start, etc.
To tune the compression configs, mainly need to tune ``matrix_approximation_rank`` and ``start_powerSGD_iter``.
bucket (dist.GradBucket): Bucket that stores a 1D flattened gradient tensor that batches multiple per-variable tensors.
Note that since DDP comm hook only supports single process single device mode,
only exactly one tensor is stored in this bucket.
Returns:
Future handler of the communication, which updates the gradients in place.
Example::
>>> # xdoctest: +SKIP
>>> state = PowerSGDState(process_group=process_group, matrix_approximation_rank=1)
>>> ddp_model.register_comm_hook(state, batched_powerSGD_hook)
""" # noqa: B950
process_group = state.process_group
group_to_use = process_group if process_group is not None else dist.group.WORLD
world_size = group_to_use.size()
# The input tensor is a flattened 1D tensor.
input_tensor = bucket.buffer()
# Run vanilla allreduce in the first `start_powerSGD_iter` iterations.
if state.iter < state.start_powerSGD_iter:
state.maybe_increase_iter(bucket)
return default._allreduce_fut(group_to_use, input_tensor)
# Apply PowerSGD after `start_powerSGD_iter` iterations.
device = input_tensor.device
total_length = input_tensor.shape[0]
state.total_numel_before_compression += total_length
# View the input tensor as a 2D square-shape tensor, and pad 0s if necessary.
square_side_length = math.ceil(math.sqrt(total_length))
state.total_numel_after_compression += (
square_side_length * state.matrix_approximation_rank * 2
)
padded_total_length = square_side_length ** 2
input_tensor.resize_(padded_total_length)
input_tensor[total_length:padded_total_length].fill_(0)
_report_compression_stats(bucket, state)
# Incorporate the error from the previous state into the gradients.
bucket_index = bucket.index()
input_tensor_cp = None
if state.use_error_feedback:
if bucket_index in state.error_dict:
input_tensor.add_(state.error_dict[bucket_index])
else:
logger.info(
"A zero tensor of length {} that represents local error is created.".format(
padded_total_length
)
)
state.error_dict[bucket_index] = torch.zeros(
padded_total_length, device=device, dtype=input_tensor.dtype
)
# Keep a copy of the input tensor,
# so that we can compute the local error caused by compression later,
# by comparing this copy and the input tensor updated after decompression.
input_tensor_cp = torch.clone(input_tensor).detach()
matrix = input_tensor.view(square_side_length, square_side_length)
# Reuse P and Q from the previous iteration if possible.
# The memory spaces of P and Q need to be allocated in the first iteration when PowerSGD is applied.
if not state.warm_start or bucket_index not in state.p_memory_dict:
# If warm-start is disabled, low-rank tensors will be initialized at every step.
# Only log this if warm-start to avoid spamming.
if state.warm_start:
logger.info(
"Initializing low-rank tensors P and Q, each of which has a shape of {} x {}.".format(
square_side_length, state.matrix_approximation_rank
)
)
def create_low_rank_tensor(fill_random_values, rng):
"Returns a low-rank 2D tensor of square_side_length * matrix_approximation_rank."
if fill_random_values:
with torch.random.fork_rng(devices=[]):
# Fork this RNG to avoid changing the seed globally and affecting the random sampling
# anywhere else in the training.
# The seed makes sure that the initial random values are the same across all the DDP replicas.
# This seed should differ at every step.
# Since it is very slow to fork RNG state across all the CUDA devices,
# only fork on CPU and then move the generated tensor to the CUDA device.
torch.manual_seed(rng.randint(1_000_000_000))
return torch.randn(
square_side_length,
state.matrix_approximation_rank,
device="cpu",
dtype=input_tensor.dtype,
).to(device)
else:
return torch.empty(
square_side_length,
state.matrix_approximation_rank,
device=device,
dtype=input_tensor.dtype,
)
state.p_memory_dict[bucket_index] = create_low_rank_tensor(
fill_random_values=False, rng=state.rng
)
state.q_memory_dict[bucket_index] = create_low_rank_tensor(
fill_random_values=True, rng=state.rng
)
_orthogonalize(state.q_memory_dict[bucket_index])
torch.matmul(
matrix, state.q_memory_dict[bucket_index], out=state.p_memory_dict[bucket_index]
)
allreduce_p_fut = dist.all_reduce(
state.p_memory_dict[bucket_index], group=group_to_use, async_op=True
).get_future()
def compute_q(fut):
state.p_memory_dict[bucket_index] = fut.value()[0]
_orthogonalize(state.p_memory_dict[bucket_index])
torch.matmul(
matrix.t(),
state.p_memory_dict[bucket_index],
out=state.q_memory_dict[bucket_index],
)
# TODO: The above procedure does two matmul+allreduce steps per iteration --
# one left multiplication and one right multiplication.
# For warm-start, can take one such step at a time, and alternate between them.
return (
dist.all_reduce(
state.q_memory_dict[bucket_index], group=group_to_use, async_op=True
)
.get_future()
.wait()[0]
)
def decompress(fut):
state.q_memory_dict[bucket_index] = fut.value().div_(world_size)
torch.matmul(
state.p_memory_dict[bucket_index],
state.q_memory_dict[bucket_index].t(),
out=matrix,
)
if state.use_error_feedback:
# Memorize the local errors.
state.error_dict[bucket_index] = input_tensor_cp - input_tensor
# Removing this seemingly unnecessary sync somehow may cause faliures.
# See: https://github.com/pytorch/pytorch/pull/54838
if torch.cuda.is_available():
torch.cuda.synchronize(device)
if not state.warm_start:
state.p_memory_dict.clear()
state.q_memory_dict.clear()
ret = input_tensor.resize_(total_length)
state.maybe_increase_iter(bucket)
return ret
return allreduce_p_fut.then(compute_q).then(decompress)
| pytorch-master | torch/distributed/algorithms/ddp_comm_hooks/powerSGD_hook.py |
from abc import ABC
import inspect
from typing import Dict, Type
from torch.distributed.fsdp import FullyShardedDataParallel
from torch.nn.parallel import DistributedDataParallel
from torch.optim import Optimizer
from torch.distributed.optim import as_functional_optim
from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import allreduce_hook
from torch.distributed.algorithms.ddp_comm_hooks.optimizer_overlap_hooks import (
_OptimizerHookState,
_hook_then_optimizer
)
# Contains the mappings between the regular and overlapped optimizer types.
_registered_overlapped_optims: Dict[Type, Type] = {}
def register_overlapped(optim_cls):
def decorator(target_overlapped_optim_cls):
if target_overlapped_optim_cls in _registered_overlapped_optims:
raise ValueError(
f"{target_overlapped_optim_cls} already registered with optim_cls "
f"{_registered_overlapped_optims[optim_cls]} {optim_cls}, trying to"
f"re-register it for {optim_cls} is not supported."
)
_registered_overlapped_optims[optim_cls] = target_overlapped_optim_cls
return target_overlapped_optim_cls
return decorator
class OverlappedOptimizer(ABC):
def __init__(self, optim_cls: Type) -> None:
"""
OverlappedOptimizer is a base class that child classes can implement to
specify how different optimizers will register themselves with DDP.
"""
self.optim_cls = optim_cls
def register_ddp(self, ddp: DistributedDataParallel) -> None:
"""Registers the overlapped optimizer with DDP."""
raise NotImplementedError(
f"{self.__class__.__name__} does not support overlapped DDP."
)
def register_fsdp(self, fsdp: FullyShardedDataParallel) -> None:
"""Registers the overlapped optimizer with FSDP."""
raise NotImplementedError(
f"{self.__class__.__name__} does not support overlapped FSDP."
)
@register_overlapped(Optimizer)
class _OverlappedStandardOptimizer(OverlappedOptimizer):
"""Overlaps a regular ``Optimizer``."""
def __init__(self, optim_cls: Type, params, *optim_args, **optim_kwargs) -> None:
super().__init__(optim_cls)
f_optim = as_functional_optim(self.optim_cls, *optim_args, **optim_kwargs)
self._opt_hook_state = _OptimizerHookState(f_optim, params)
def register_ddp(self, ddp_inst: DistributedDataParallel):
# NOTE: using a custom communication hook and fused optimizer is not
# yet supported.
ddp_inst.register_comm_hook( # type: ignore[operator]
None, # wrapped hook state
_hook_then_optimizer(allreduce_hook, self._opt_hook_state)
)
# TODO: register_fsdp once FSDP supports communication hook.
def _as_overlapped_optim(optim_cls: Type, params, *args, **kwargs):
"""
Returns a new ``OverlappedOptimizer`` instance that supports ``optim_cls``.
"""
for clz in inspect.getmro(optim_cls):
try:
return _registered_overlapped_optims[clz](optim_cls, params, *args, **kwargs)
except KeyError:
pass
# Fallback to standard overlapped optimizer, which will raise errors if user
# is attempting to use an unsupported optimizer.
return _OverlappedStandardOptimizer(optim_cls, params, *args, **kwargs)
| pytorch-master | torch/distributed/algorithms/_optimizer_overlap/optimizer_overlap.py |
from .optimizer_overlap import _as_overlapped_optim
| pytorch-master | torch/distributed/algorithms/_optimizer_overlap/__init__.py |
import functools
import torch
import torch.distributed as dist
from enum import Enum
TORCH_HALF_MIN = torch.finfo(torch.float16).min
TORCH_HALF_MAX = torch.finfo(torch.float16).max
class DQuantType(Enum):
"""
Different quantization methods for auto_quantize API are identified here.
auto_quantize API currently supports fp16 and bfp16 methods.
"""
FP16 = "fp16",
BFP16 = "bfp16"
def __str__(self) -> str:
return self.value
def _fp32_to_fp16_with_clamp(tensor: torch.Tensor) -> torch.Tensor:
return torch.clamp(tensor, TORCH_HALF_MIN, TORCH_HALF_MAX).half()
def _quantize_tensor(tensor, qtype):
if not isinstance(tensor, torch.Tensor):
raise RuntimeError(
f"_quantize_tensor expecting torch.Tensor as input but found {type(tensor)}"
)
if (qtype == DQuantType.FP16):
return _fp32_to_fp16_with_clamp(tensor)
elif (qtype == DQuantType.BFP16):
return torch.ops.quantization._FloatToBfloat16Quantized(tensor)
else:
raise RuntimeError(
f'Quantization type {qtype} is not supported'
)
def _quantize_tensor_list(tensor_list, qtype):
if not isinstance(tensor_list, list) or not all(
isinstance(p, torch.Tensor) for p in tensor_list
):
raise RuntimeError(
f"_quantize_tensor_list expecting list of torch.Tensor as input but found {type(tensor_list)}"
)
quantized_tensor_list = [_quantize_tensor(t, qtype) for t in tensor_list]
return quantized_tensor_list
def _dequantize_tensor(tensor, qtype, quant_loss=None):
if not isinstance(tensor, torch.Tensor):
raise RuntimeError(
f"_dequantize_tensor expecting torch.Tensor as input but found {type(tensor)}"
)
if (qtype == DQuantType.FP16):
if tensor.dtype != torch.float16:
raise RuntimeError(
f"tensor dtype is {tensor.dtype} while expected to be FP16."
)
elif tensor.dtype == torch.float16 and quant_loss is None:
return tensor.float()
else:
return tensor.float() / quant_loss
elif (qtype == DQuantType.BFP16):
if tensor.dtype != torch.float16:
raise RuntimeError(
f"tensor dtype is {tensor.dtype} while expected to be FP16."
)
else:
return torch.ops.quantization._Bfloat16QuantizedToFloat(tensor)
else:
raise RuntimeError(
f'Quantization type {qtype} is not supported'
)
def _dequantize_tensor_list(tensor_list, qtype, quant_loss=None):
if not isinstance(tensor_list, list) or not all(
isinstance(p, torch.Tensor) for p in tensor_list
):
raise RuntimeError(
f"_dequantize_tensor_list expecting list of torch.Tensor as input but found {type(tensor_list)}"
)
dequantized_tensor_list = [_dequantize_tensor(t, qtype) for t in tensor_list]
return dequantized_tensor_list
def auto_quantize(func, qtype, quant_loss=None):
"""
This is a prototype API that automatically quantize the input tensors, choose the precision types, and
pass other necessary arguments and then dequantizes the output.
Currently it only supports:
. FP16 and BFP16 quantization method supported for gloo and nccl backends
. all_gather, all_to_all collective ops
Note: BFP16 only supports 2D tensors.
Args:
func (Callable): A function representing collective operations.
qtype (QuantType): Quantization method
quant_loss (float, optional): This can be used to improve accuracy in the dequantization.
Returns:
(Callable): the same collective as func but enables automatic quantization/dequantization.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
group = kwargs.get('group', None)
async_op = kwargs.get('async_op', False)
if (async_op is True):
raise RuntimeError(
'The async_op=True mode is not supported yet.'
)
if (func == dist.all_gather):
tensors = args[0]
input_tensors = _quantize_tensor(args[1], qtype)
out_tensors = _quantize_tensor_list(tensors, qtype)
dist.all_gather(out_tensors, input_tensors, group=group, async_op=async_op)
for i, t in enumerate(_dequantize_tensor_list(out_tensors, qtype, quant_loss=quant_loss)):
tensors[i] = t
elif (func == dist.all_to_all):
tensors = args[0]
input_tensors = _quantize_tensor_list(args[1], qtype)
out_tensors = _quantize_tensor_list(tensors, qtype)
dist.all_to_all(out_tensors, input_tensors, group=group, async_op=async_op)
for i, t in enumerate(_dequantize_tensor_list(out_tensors, qtype, quant_loss=quant_loss)):
tensors[i] = t
elif (func == dist.all_to_all_single):
tensors = args[0]
out_splits = kwargs.get('out_splits', None)
in_splits = kwargs.get('in_splits', None)
# Quantizing the input/output tensor
input_tensors = _quantize_tensor(args[1], qtype)
out_tensors = _quantize_tensor(tensors, qtype)
dist.all_to_all_single(out_tensors, input_tensors, out_splits, in_splits, group=group)
for i, t in enumerate(_dequantize_tensor(out_tensors, qtype, quant_loss=quant_loss)):
tensors[i] = t
else:
raise RuntimeError(
f"The collective op {func} is not supported yet"
)
return wrapper
| pytorch-master | torch/distributed/algorithms/_quantization/quantization.py |
pytorch-master | torch/distributed/algorithms/_quantization/__init__.py |
|
"""
:mod:`torch.distributed.optim` exposes DistributedOptimizer, which takes a list
of remote parameters (:class:`~torch.distributed.rpc.RRef`) and runs the
optimizer locally on the workers where the parameters live. The distributed
optimizer can use any of the local optimizer :ref:`optimizer-algorithms` to
apply the gradients on each worker.
"""
import torch
from torch import optim
from .functional_adagrad import _FunctionalAdagrad
from .functional_adam import _FunctionalAdam
from .functional_adamw import _FunctionalAdamW
from .functional_sgd import _FunctionalSGD
from .functional_adadelta import _FunctionalAdadelta
from .functional_rmsprop import _FunctionalRMSprop
from .functional_rprop import _FunctionalRprop
from .functional_adamax import _FunctionalAdamax
from .utils import as_functional_optim
# DistributedOptimizer imports torch.distributed.rpc names, so gate availability
# based on RPC being available.
if hasattr(torch._C, '_rpc_init'):
from .optimizer import DistributedOptimizer
from .post_localSGD_optimizer import PostLocalSGDOptimizer
from .zero_redundancy_optimizer import ZeroRedundancyOptimizer
| pytorch-master | torch/distributed/optim/__init__.py |
from typing import List, Optional, Dict
import torch
import torch.optim._functional as F
from torch import Tensor
__all__ : List[str] = []
# Define a TorchScript compatible Functional SGD Optimizer
# where we use these optimizer in a functional way.
# Instead of using the `param.grad` when updating parameters,
# we explicitly allow the distributed optimizer pass gradients to
# the `step` function. In this way, we could separate the gradients
# and parameters and allow multithreaded trainer to update the
# parameters without data traces on accumulating to the same .grad.
# NOTE: This should be only used by distributed optimizer internals
# and not meant to expose to the user.
@torch.jit.script
class _FunctionalSGD(object):
def __init__(
self,
params: List[Tensor],
lr: float = 1e-2,
momentum: float = 0.0,
dampening: float = 0.0,
weight_decay: float = 0.0,
nesterov: bool = False,
maximize: bool = False,
foreach: bool = False,
_allow_empty_param_list: bool = False,
):
self.defaults = {
"lr": lr,
"momentum": momentum,
"dampening": dampening,
"weight_decay": weight_decay,
}
self.nesterov = nesterov
self.maximize = maximize
self.foreach = foreach
self.state = torch.jit.annotate(Dict[torch.Tensor, Dict[str, torch.Tensor]], {})
if len(params) == 0 and not _allow_empty_param_list:
raise ValueError("optimizer got an empty parameter list")
# NOTE: we only have one param_group and don't allow user to add additional
# param group as it's not a common use case.
self.param_group = {"params": params}
def step_param(self, param: Tensor, grad: Optional[Tensor]):
""" Similar to self.step, but operates on a single parameter and
its gradient.
"""
# TODO: Once step_param interface is robust, refactor step to call
# step param on each param.
weight_decay = self.defaults['weight_decay']
momentum = self.defaults['momentum']
dampening = self.defaults['dampening']
lr = self.defaults['lr']
params = [param]
momentum_buffer_list: List[Optional[Tensor]] = []
grads = []
has_sparse_grad = False
if grad is not None:
grads.append(grad)
if grad.is_sparse:
has_sparse_grad = True
if param not in self.state:
self.state[param] = {}
state = self.state[param]
if 'momentum_buffer' not in state:
momentum_buffer_list.append(None)
else:
momentum_buffer_list.append(state['momentum_buffer'])
with torch.no_grad():
F.sgd(
params,
grads,
momentum_buffer_list,
weight_decay=weight_decay,
momentum=momentum,
lr=lr,
dampening=dampening,
nesterov=self.nesterov,
maximize=self.maximize,
has_sparse_grad=has_sparse_grad,
foreach=self.foreach,
)
# update momentum_buffer in state
state = self.state[param]
momentum_buffer = momentum_buffer_list[0]
if momentum_buffer is not None:
state['momentum_buffer'] = momentum_buffer
def step(self, gradients: List[Optional[Tensor]]):
params = self.param_group['params']
params_with_grad = []
grads = []
momentum_buffer_list: List[Optional[Tensor]] = []
lr = self.defaults['lr']
weight_decay = self.defaults['weight_decay']
momentum = self.defaults['momentum']
dampening = self.defaults['dampening']
if len(params) != len(gradients):
raise ValueError(
"the gradients passed in does not equal to the size of the parameters!"
+ f"Params length: {len(params)}. "
+ f"Gradients length: {len(gradients)}"
)
has_sparse_grad = False
for param, gradient in zip(params, gradients):
if gradient is not None:
params_with_grad.append(param)
grads.append(gradient)
if gradient.is_sparse:
has_sparse_grad = True
if param not in self.state:
self.state[param] = {}
state = self.state[param]
if 'momentum_buffer' not in state:
momentum_buffer_list.append(None)
else:
momentum_buffer_list.append(state['momentum_buffer'])
with torch.no_grad():
F.sgd(params_with_grad,
grads,
momentum_buffer_list,
weight_decay=weight_decay,
momentum=momentum,
lr=lr,
dampening=dampening,
nesterov=self.nesterov,
maximize=self.maximize,
has_sparse_grad=has_sparse_grad,
foreach=self.foreach,
)
# update momentum_buffers in state
for i, p in enumerate(params_with_grad):
state = self.state[p]
momentum_buffer = momentum_buffer_list[i]
if momentum_buffer is not None:
state['momentum_buffer'] = momentum_buffer
| pytorch-master | torch/distributed/optim/functional_sgd.py |
from typing import List, Dict, Optional
import torch
import torch.optim._functional as F
from torch import Tensor
__all__ : List[str] = []
# Define a TorchScript compatible Functional Adagrad Optimizer
# where we use these optimizer in a functional way.
# Instead of using the `param.grad` when updating parameters,
# we explicitly let the user pass gradients to the `step` function
# this is so that we could separate the gradients and parameters
# and allow multithreaded trainer to update the parameters
# without data traces on accumulating to the same .grad.
# NOTE: This should be only used by distributed optimizer internals
# and not meant to expose to the user.
@torch.jit.script
class _FunctionalAdagrad(object):
def __init__(
self,
params: List[Tensor],
lr: float = 1e-2,
lr_decay: float = 0.0,
weight_decay: float = 0.0,
initial_accumulator_value: float = 0.0,
warmup_lr_multiplier: float = 1.0,
warmup_num_iters: float = 0.0,
eps: float = 1e-10,
coalesce_grad: bool = True,
foreach: bool = False,
maximize: bool = False,
_allow_empty_param_list: bool = False,
):
self.defaults = {
"lr": lr,
"lr_decay": lr_decay,
"eps": eps,
"weight_decay": weight_decay,
"initial_accumulator_value": initial_accumulator_value,
"warmup_lr_multiplier": warmup_lr_multiplier,
"warmup_num_iters": warmup_num_iters,
}
self.coalesce_grad = coalesce_grad
self.foreach = foreach
self.maximize = maximize
self.state = torch.jit.annotate(Dict[torch.Tensor, Dict[str, torch.Tensor]], {})
if len(params) == 0 and not _allow_empty_param_list:
raise ValueError("optimizer got an empty parameter list")
# NOTE: we only have one param_group and don't allow user to add additional
# param group as it's not a common use case.
self.param_group = {"params": params}
# TODO: no union or any types in TorchScript, make step a scalar tensor instead
# This is also needed by if we want to share_memory on the step across processes
for p in self.param_group["params"]:
self.state[p] = {
"sum": torch.full_like(p.data, initial_accumulator_value),
"step": torch.tensor(0.0),
}
def step(self, gradients: List[Optional[Tensor]]):
params = self.param_group['params']
params_with_grad = []
grads = []
state_sums = []
state_steps: List[Tensor] = []
if len(params) != len(gradients):
raise ValueError(
"the gradients passed in does not equal to the size of the parameters!"
+ f"Params length: {len(params)}. "
+ f"Gradients length: {len(gradients)}"
)
has_sparse_grad = False
for param, gradient in zip(self.param_group['params'], gradients):
if gradient is not None:
if gradient.is_sparse:
has_sparse_grad = True
params_with_grad.append(param)
grads.append(gradient)
state = self.state[param]
state_sums.append(state['sum'])
state_steps.append(state['step'])
with torch.no_grad():
F.adagrad(params,
grads,
state_sums,
state_steps,
lr=self.defaults['lr'],
weight_decay=self.defaults['weight_decay'],
lr_decay=self.defaults['lr_decay'],
eps=self.defaults['eps'],
has_sparse_grad=has_sparse_grad,
foreach=self.foreach,
maximize=self.maximize)
| pytorch-master | torch/distributed/optim/functional_adagrad.py |
from typing import List, Dict, Optional, Tuple
import torch
import torch.optim._functional as F
from torch import Tensor
__all__ : List[str] = []
# Define a TorchScript compatible Functional AdamW Optimizer
# where we use these optimizer in a functional way.
# Instead of using the `param.grad` when updating parameters,
# we explicitly allow the distributed optimizer pass gradients to
# the `step` function. In this way, we could separate the gradients
# and parameters and allow multithreaded trainer to update the
# parameters without data traces on accumulating to the same .grad.
# NOTE: This should be only used by distributed optimizer internals
# and not meant to expose to the user.
@torch.jit.script
class _FunctionalAdamW(object):
def __init__(
self,
params: List[Tensor],
lr: float = 1e-3,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-8,
weight_decay: float = 1e-2,
amsgrad: bool = False,
maximize: bool = False,
foreach: bool = False,
_allow_empty_param_list: bool = False,
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
self.defaults = {
"lr": lr,
"eps": eps,
"beta1": betas[0],
"beta2": betas[1],
"weight_decay": weight_decay,
}
self.amsgrad = amsgrad
self.maximize = maximize
self.foreach = foreach
self.state = torch.jit.annotate(Dict[torch.Tensor, Dict[str, torch.Tensor]], {})
if len(params) == 0 and not _allow_empty_param_list:
raise ValueError("optimizer got an empty parameter list")
# NOTE: we only have one param_group and don't allow user to add additional
# param group as it's not a common use case.
self.param_group = {"params": params}
def step_param(self, param: Tensor, grad: Optional[Tensor]):
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
max_exp_avg_sqs = []
state_steps: List[Tensor] = []
if grad is not None:
params_with_grad.append(param)
grads.append(grad)
# Lazy state initialization
if param not in self.state:
self.state[param] = {}
state = self.state[param]
state['step'] = torch.tensor(0.0)
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(param, memory_format=torch.preserve_format)
if self.amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(param, memory_format=torch.preserve_format)
state = self.state[param]
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
if self.amsgrad:
max_exp_avg_sqs.append(state['max_exp_avg_sq'])
state_steps.append(state['step'])
with torch.no_grad():
F.adamw(params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=self.amsgrad,
maximize=self.maximize,
beta1=self.defaults['beta1'],
beta2=self.defaults['beta2'],
lr=self.defaults['lr'],
weight_decay=self.defaults['weight_decay'],
eps=self.defaults['eps'],
foreach=self.foreach)
def step(self, gradients: List[Optional[Tensor]]):
params = self.param_group['params']
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
max_exp_avg_sqs = []
state_steps: List[Tensor] = []
if len(params) != len(gradients):
raise ValueError(
"the gradients passed in does not equal to the size of the parameters!"
+ f"Params length: {len(params)}. "
+ f"Gradients length: {len(gradients)}"
)
for param, gradient in zip(self.param_group['params'], gradients):
if gradient is not None:
params_with_grad.append(param)
grads.append(gradient)
# Lazy state initialization
if param not in self.state:
self.state[param] = {}
state = self.state[param]
state['step'] = torch.tensor(0.0)
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(param, memory_format=torch.preserve_format)
if self.amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(param, memory_format=torch.preserve_format)
state = self.state[param]
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
if self.amsgrad:
max_exp_avg_sqs.append(state['max_exp_avg_sq'])
state_steps.append(state['step'])
with torch.no_grad():
F.adamw(params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=self.amsgrad,
maximize=self.maximize,
beta1=self.defaults['beta1'],
beta2=self.defaults['beta2'],
lr=self.defaults['lr'],
weight_decay=self.defaults['weight_decay'],
eps=self.defaults['eps'],
foreach=self.foreach)
| pytorch-master | torch/distributed/optim/functional_adamw.py |
from typing import List, Dict, Optional, Tuple
import torch
import torch.optim._functional as F
from torch import Tensor
__all__ : List[str] = []
# Define a TorchScript compatible Functional Adam Optimizer
# where we use these optimizer in a functional way.
# Instead of using the `param.grad` when updating parameters,
# we explicitly allow the distributed optimizer pass gradients to
# the `step` function. In this way, we could separate the gradients
# and parameters and allow multithreaded trainer to update the
# parameters without data traces on accumulating to the same .grad.
# NOTE: This should be only used by distributed optimizer internals
# and not meant to expose to the user.
@torch.jit.script
class _FunctionalAdam(object):
def __init__(
self,
params: List[Tensor],
lr: float = 1e-3,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-8,
weight_decay: float = 0.0,
amsgrad: bool = False,
maximize: bool = False,
foreach: bool = False,
_allow_empty_param_list: bool = False,
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
self.defaults = {
"lr": lr,
"eps": eps,
"beta1": betas[0],
"beta2": betas[1],
"weight_decay": weight_decay,
}
self.amsgrad = amsgrad
self.maximize = maximize
self.foreach = foreach
self.state = torch.jit.annotate(Dict[torch.Tensor, Dict[str, torch.Tensor]], {})
if len(params) == 0 and not _allow_empty_param_list:
raise ValueError("optimizer got an empty parameter list")
# NOTE: we only have one param_group and don't allow user to add additional
# param group as it's not a common use case.
self.param_group = {"params": params}
def step_param(self, param: Tensor, grad: Optional[Tensor]):
"""
Similar to step, but operates on a single parameter and optionally a
gradient tensor.
"""
params = [param]
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
max_exp_avg_sqs = []
state_steps: List[Tensor] = []
if grad is not None:
params_with_grad.append(param)
grads.append(grad)
if param not in self.state:
self.state[param] = {}
state = self.state[param]
state['step'] = torch.tensor(0.0)
state['exp_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
state['exp_avg_sq'] = torch.zeros_like(param, memory_format=torch.preserve_format)
if self.amsgrad:
state['max_exp_avg_sq'] = torch.zeros_like(param, memory_format=torch.preserve_format)
state = self.state[param]
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
if self.amsgrad:
max_exp_avg_sqs.append(state['max_exp_avg_sq'])
state_steps.append(state['step'])
with torch.no_grad():
F.adam(params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=self.amsgrad,
maximize=self.maximize,
beta1=self.defaults['beta1'],
beta2=self.defaults['beta2'],
lr=self.defaults['lr'],
weight_decay=self.defaults['weight_decay'],
eps=self.defaults['eps'],
foreach=self.foreach)
def step(self, gradients: List[Optional[Tensor]]):
params = self.param_group['params']
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
max_exp_avg_sqs = []
state_steps: List[Tensor] = []
if len(params) != len(gradients):
raise ValueError(
"the gradients passed in does not equal to the size of the parameters!"
+ f"Params length: {len(params)}. "
+ f"Gradients length: {len(gradients)}"
)
for param, gradient in zip(self.param_group['params'], gradients):
if gradient is not None:
params_with_grad.append(param)
grads.append(gradient)
# Lazy state initialization
if param not in self.state:
self.state[param] = {}
state = self.state[param]
state['step'] = torch.tensor(0.0)
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(param, memory_format=torch.preserve_format)
if self.amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(param, memory_format=torch.preserve_format)
state = self.state[param]
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
if self.amsgrad:
max_exp_avg_sqs.append(state['max_exp_avg_sq'])
state_steps.append(state['step'])
with torch.no_grad():
F.adam(params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=self.amsgrad,
maximize=self.maximize,
beta1=self.defaults['beta1'],
beta2=self.defaults['beta2'],
lr=self.defaults['lr'],
weight_decay=self.defaults['weight_decay'],
eps=self.defaults['eps'],
foreach=self.foreach)
| pytorch-master | torch/distributed/optim/functional_adam.py |
from typing import Type
from torch import optim
from .functional_adagrad import _FunctionalAdagrad
from .functional_adam import _FunctionalAdam
from .functional_adamw import _FunctionalAdamW
from .functional_sgd import _FunctionalSGD
from .functional_adadelta import _FunctionalAdadelta
from .functional_rmsprop import _FunctionalRMSprop
from .functional_rprop import _FunctionalRprop
from .functional_adamax import _FunctionalAdamax
# dict to map a user passed in optimizer_class to a functional
# optimizer class if we have already defined inside the
# distributed.optim package, this is so that we hide the
# functional optimizer to user and still provide the same API.
functional_optim_map = {
optim.Adagrad: _FunctionalAdagrad,
optim.Adam: _FunctionalAdam,
optim.AdamW: _FunctionalAdamW,
optim.SGD: _FunctionalSGD,
optim.Adadelta: _FunctionalAdadelta,
optim.RMSprop: _FunctionalRMSprop,
optim.Rprop: _FunctionalRprop,
optim.Adamax: _FunctionalAdamax,
}
def register_functional_optim(key, optim):
"""
Interface to insert a new functional optimizer to functional_optim_map
``fn_optim_key`` and ``fn_optimizer`` are user defined. The optimizer and key
need not be of :class:`torch.optim.Optimizer` (e.g. for custom optimizers)
Example::
>>> # import the new functional optimizer
>>> # xdoctest: +SKIP
>>> from xyz import fn_optimizer
>>> from torch.distributed.optim.utils import register_functional_optim
>>> fn_optim_key = "XYZ_optim"
>>> register_functional_optim(fn_optim_key, fn_optimizer)
"""
if key not in functional_optim_map:
functional_optim_map[key] = optim
def as_functional_optim(optim_cls: Type, *args, **kwargs):
try:
functional_cls = functional_optim_map[optim_cls]
except KeyError:
raise ValueError(f"Optimizer {optim_cls} does not have a functional counterpart!")
return _create_functional_optim(functional_cls, *args, **kwargs)
def _create_functional_optim(functional_optim_cls: Type, *args, **kwargs):
return functional_optim_cls(
[],
*args,
**kwargs,
_allow_empty_param_list=True,
)
| pytorch-master | torch/distributed/optim/utils.py |
from typing import List, Dict, Optional, Tuple
import torch
import torch.optim._functional as F
from torch import Tensor
__all__ : List[str] = []
# Define a TorchScript compatible Functional Rprop Optimizer
# where we use these optimizer in a functional way.
# Instead of using the `param.grad` when updating parameters,
# we explicitly allow the distributed optimizer pass gradients to
# the `step` function. In this way, we could separate the gradients
# and parameters and allow multithreaded trainer to update the
# parameters without data traces on accumulating to the same .grad.
# NOTE: This should be only used by distributed optimizer internals
# and not meant to expose to the user.
@torch.jit.script
class _FunctionalRprop(object):
def __init__(
self,
params: List[Tensor],
lr: float = 1e-2,
etas: Tuple[float, float] = (0.5, 1.2),
step_sizes: Tuple[float, float] = (1e-6, 50),
foreach: bool = False,
maximize: bool = False,
_allow_empty_param_list: bool = False,
):
self.defaults = {
"lr": lr,
}
self.etas = etas
self.step_sizes = step_sizes
self.foreach = foreach
self.maximize = maximize
if len(params) == 0 and not _allow_empty_param_list:
raise ValueError("optimizer got an empty parameter list")
# NOTE: we only have one param_group and don't allow user to add additional
# param group as it's not a common use case.
self.param_group = {"params": params}
self.state = torch.jit.annotate(Dict[torch.Tensor, Dict[str, torch.Tensor]], {})
def step(self, gradients: List[Optional[Tensor]]):
params = self.param_group['params']
params_with_grad = []
grads = []
prevs = []
step_sizes = []
lr = self.defaults['lr']
etaminus, etaplus = self.etas
step_size_min, step_size_max = self.step_sizes
if len(params) != len(gradients):
raise ValueError(
"the gradients passed in does not equal to the size of the parameters!"
+ f"Params length: {len(params)}. "
+ f"Gradients length: {len(gradients)}"
)
for param, gradient in zip(params, gradients):
if gradient is not None:
params_with_grad.append(param)
grads.append(gradient)
# Lazy state initialization
if param not in self.state:
self.state[param] = {}
state = self.state[param]
state['step'] = torch.tensor(0.0)
state['prev'] = torch.zeros_like(param, memory_format=torch.preserve_format)
state['step_size'] = torch.full_like(gradient, lr)
state = self.state[param]
prevs.append(state['prev'])
step_sizes.append(state['step_size'])
state['step'] += 1
with torch.no_grad():
F.rprop(params_with_grad,
grads,
prevs,
step_sizes,
step_size_min=step_size_min,
step_size_max=step_size_max,
etaminus=etaminus,
etaplus=etaplus,
foreach=self.foreach,
maximize=self.maximize)
| pytorch-master | torch/distributed/optim/functional_rprop.py |
import torch
import torch.distributed.algorithms.model_averaging.averagers as averagers
import warnings
class PostLocalSGDOptimizer(torch.optim.Optimizer):
r"""
Wraps an arbitrary :class:`torch.optim.Optimizer` and runs `post-local SGD <https://arxiv.org/abs/1808.07217>`_,
This optimizer runs local optimizer at every step.
After the warm-up stage, it averages parameters periodically afer the local optimizer is applied.
Args:
optim: The local optimizer.
averager: A model averager instance to run post-localSGD algorithm.
Example::
>>> # xdoctest: +SKIP("undefined variables")
>>> import torch
>>> import torch.distributed as dist
>>> import torch.distributed.algorithms.model_averaging.averagers as averagers
>>> import torch.nn as nn
>>> from torch.distributed.optim import PostLocalSGDOptimizer
>>> from torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook import (
>>> PostLocalSGDState,
>>> post_localSGD_hook,
>>> )
>>>
>>> model = nn.parallel.DistributedDataParallel(
>>> module, device_ids=[rank], output_device=rank
>>> )
>>>
>>> # Register a post-localSGD communication hook.
>>> state = PostLocalSGDState(process_group=None, subgroup=None, start_localSGD_iter=100)
>>> model.register_comm_hook(state, post_localSGD_hook)
>>>
>>> # Create a post-localSGD optimizer that wraps a local optimizer.
>>> # Note that ``warmup_steps`` used in ``PostLocalSGDOptimizer`` must be the same as
>>> # ``start_localSGD_iter`` used in ``PostLocalSGDState``.
>>> local_optim = torch.optim.SGD(params=model.parameters(), lr=0.01)
>>> opt = PostLocalSGDOptimizer(
>>> optim=local_optim,
>>> averager=averagers.PeriodicModelAverager(period=4, warmup_steps=100)
>>> )
>>>
>>> # In the first 100 steps, DDP runs global gradient averaging at every step.
>>> # After 100 steps, DDP runs gradient averaging within each subgroup (intra-node by default),
>>> # and post-localSGD optimizer runs global model averaging every 4 steps after applying the local optimizer.
>>> for step in range(0, 200):
>>> opt.zero_grad()
>>> loss = loss_fn(output, labels)
>>> loss.backward()
>>> opt.step()
"""
def __init__(
self,
optim: torch.optim.Optimizer,
averager: averagers.ModelAverager
):
self.optim = optim
self.param_groups = self.optim.param_groups
self.averager = averager
@property
def state(self):
return self.optim.state
def __repr__(self):
return self.optim.__repr__()
def state_dict(self):
r"""
This is the same as :class:`torch.optim.Optimizer` :meth:`state_dict`,
but adds an extra entry to record model averager's step to the checkpoint
to ensure reload does not cause unnecessary warm up again.
"""
optim_state_dict = self.optim.state_dict()
optim_state_dict['step'] = self.averager.step
return optim_state_dict
def load_state_dict(self, state_dict):
r"""
This is the same as :class:`torch.optim.Optimizer` :meth:`load_state_dict`,
but also restores model averager's step value to the one
saved in the provided ``state_dict``.
If there is no ``"step"`` entry in ``state_dict``,
it will raise a warning and initialize the model averager's step to 0.
"""
self.optim.load_state_dict(state_dict)
if 'step' in state_dict:
self.averager.step = state_dict['step']
else:
warnings.warn("Loaded state dict does not contain a step counter for an averager. "
"Setting step counter to 0.")
self.averager.step = 0
def step(self):
r"""
Performs a single optimization step (parameter update).
"""
self.optim.step()
self.averager.average_parameters(params=self.param_groups)
def zero_grad(self, set_to_none: bool = False): # type: ignore[override]
self.optim.zero_grad(set_to_none=set_to_none)
def add_param_group(self, param_group):
self.optim.add_param_group(param_group)
| pytorch-master | torch/distributed/optim/post_localSGD_optimizer.py |
from typing import List, Optional
import logging
import torch
import torch.distributed.rpc as rpc
import torch.jit as jit
import torch.nn as nn
from torch import Tensor
from torch.distributed.rpc import RRef
from .utils import functional_optim_map
import torch.distributed.autograd as dist_autograd
from collections import defaultdict
from threading import Lock
__all__ = ['DistributedOptimizer']
logger = logging.getLogger(__name__)
# XXX: we define a _ScriptModuleOptimizer here to explicitly
# compile the FunctionalOptimizer class into TorchScript
# This is because ScriptClass instance still lives in
# python unless you explicitly compile it as an attribute
# in ScriptModule or pass it to a ScriptFunction
# _ScriptLocalOptimizerInterface serves as a common
# interface type for Optimizer ScriptModules.
#
# TODO (wanchaol): remove this once we added TorchScript
# class reference semantics
@jit.interface
class _ScriptLocalOptimizerInterface(object):
def step(self, autograd_ctx_id: int) -> None:
pass
class _ScriptLocalOptimizer(nn.Module):
# TorchScript does not support multithread concurrent compiling.
# request_callback might invoke concurrent compiling, so we
# serialize the compiling with a lock
compile_lock = Lock()
def __init__(self, optim_cls, local_params_rref, *args, **kwargs):
super().__init__()
self._local_params = [rref.local_value() for rref in local_params_rref]
self.optim = optim_cls(
self._local_params,
*args,
**kwargs)
@jit.export
def step(self, autograd_ctx_id: int):
all_local_grads = dist_autograd.get_gradients(autograd_ctx_id)
# apply functional optimizer step with a list of gradients
grads: List[Optional[Tensor]] = [
all_local_grads[p] if p in all_local_grads else None
for p in self._local_params
]
self.optim.step(grads)
# TODO (wanchaol): remove/merge this with ScriptLocalOptimizer once
# we have converted all to functional optimizer in distributed.optim
class _LocalOptimizer(object):
# Ideally we would only need to share a lock for instances of
# _LocalOptimizer that deal with the same parameters. We are
# making a simplifying assumption here that if there is more
# than one instance of _LocalOptimizer per worker, they will
# be optimizing the same parameters (e.g. each data parallel
# trainer will create its own instance of _LocalOptimizer but
# they will all optimize the same parameters on each worker)
global_lock = Lock()
def __init__(self, optim_cls, local_params_rref, *args, **kwargs):
self._local_params = [rref.local_value() for rref in local_params_rref]
self.optim = optim_cls(
self._local_params,
*args,
**kwargs)
def step(self, autograd_ctx_id):
all_local_grads = dist_autograd.get_gradients(autograd_ctx_id)
with _LocalOptimizer.global_lock:
for param, grad in all_local_grads.items():
param.grad = grad
self.optim.step()
def _new_local_optimizer(optim_cls, local_params_rref, *args, **kwargs):
return rpc.RRef(
_LocalOptimizer(optim_cls, local_params_rref, *args, **kwargs))
def _local_optimizer_step(local_optim_rref, autograd_ctx_id):
local_optim = local_optim_rref.local_value()
local_optim.step(autograd_ctx_id)
# new/step functions combined with _ScriptLocalOptimizer to provide GIL-free optimizer
def _new_script_local_optimizer(optim_cls, local_params_rref, *args, **kwargs):
optim = _ScriptLocalOptimizer(optim_cls, local_params_rref, *args, **kwargs)
with _ScriptLocalOptimizer.compile_lock:
script_optim = jit.script(optim)
return rpc.RRef(
script_optim, _ScriptLocalOptimizerInterface)
@jit.script
def _script_local_optimizer_step(
local_optim_rref: RRef[_ScriptLocalOptimizerInterface],
autograd_ctx_id: int
) -> None:
local_optim = local_optim_rref.local_value()
local_optim.step(autograd_ctx_id)
def _wait_for_all(rpc_futs):
# TODO: improve error propagation
exception = None
results = []
for fut in rpc_futs:
try:
results.append(fut.wait())
except Exception as e:
results.append(e)
exception = e
if exception is not None:
raise exception
return results
class DistributedOptimizer:
"""
DistributedOptimizer takes remote references to parameters scattered
across workers and applies the given optimizer locally for each parameter.
This class uses :meth:`~torch.distributed.autograd.get_gradients` in order
to retrieve the gradients for specific parameters.
Concurrent calls to
:meth:`~torch.distributed.optim.DistributedOptimizer.step`,
either from the same or different clients, will
be serialized on each worker -- as each worker's optimizer can only work
on one set of gradients at a time. However, there is no guarantee that
the full forward-backward-optimizer sequence will execute for one client
at a time. This means that the gradients being applied may not correspond
to the latest forward pass executed on a given worker. Also, there is no
guaranteed ordering across workers.
`DistributedOptimizer` creates the local optimizer with TorchScript enabled
by default, so that optimizer updates are not blocked by the Python Global
Interpreter Lock (GIL) in the case of multithreaded training (e.g. Distributed
Model Parallel). This feature is currently enabled for most optimizers. You
can also follow `the recipe`__ in PyTorch tutorials to enable TorchScript support
for your own custom optimizers.
Args:
optimizer_class (optim.Optimizer): the class of optimizer to
instantiate on each worker.
params_rref (list[RRef]): list of RRefs to local or remote parameters
to optimize.
args: arguments to pass to the optimizer constructor on each worker.
kwargs: arguments to pass to the optimizer constructor on each worker.
Example::
>>> import torch.distributed.autograd as dist_autograd
>>> import torch.distributed.rpc as rpc
>>> from torch import optim
>>> from torch.distributed.optim import DistributedOptimizer
>>>
>>> # xdoctest: +SKIP
>>> with dist_autograd.context() as context_id:
>>> # Forward pass.
>>> rref1 = rpc.remote("worker1", torch.add, args=(torch.ones(2), 3))
>>> rref2 = rpc.remote("worker1", torch.add, args=(torch.ones(2), 1))
>>> loss = rref1.to_here() + rref2.to_here()
>>>
>>> # Backward pass.
>>> dist_autograd.backward(context_id, [loss.sum()])
>>>
>>> # Optimizer.
>>> dist_optim = DistributedOptimizer(
>>> optim.SGD,
>>> [rref1, rref2],
>>> lr=0.05,
>>> )
>>> dist_optim.step(context_id)
__ https://github.com/pytorch/tutorials/pull/1465
"""
def __init__(self, optimizer_class, params_rref, *args, **kwargs):
torch._C._log_api_usage_once("torch.distributed.optim.DistributedOptimizer")
per_worker_params_rref = defaultdict(list)
for param in params_rref:
per_worker_params_rref[param.owner()].append(param)
if optimizer_class in functional_optim_map and jit._state._enabled:
optim_ctor = functional_optim_map.get(optimizer_class)
else:
optim_ctor = optimizer_class
self.is_functional_optim = (optim_ctor != optimizer_class)
if self.is_functional_optim:
optimizer_new_func = _new_script_local_optimizer
else:
logger.warn(
f"Creating the optimizer {optimizer_class} without TorchScript support, "
"this might result in slow computation time in multithreading environment"
"(i.e. Distributed Model Parallel training on CPU) due to the Python's "
"Global Interpreter Lock (GIL). Please file an issue if you need this "
"optimizer in TorchScript. "
)
optimizer_new_func = _new_local_optimizer
remote_optim_futs = []
for worker, param_rrefs in per_worker_params_rref.items():
remote_optim_rref_fut = rpc.rpc_async(
worker,
optimizer_new_func,
args=(optim_ctor, param_rrefs) + args,
kwargs=kwargs,
)
remote_optim_futs.append(remote_optim_rref_fut)
self.remote_optimizers = _wait_for_all(remote_optim_futs)
def step(self, context_id):
"""
Performs a single optimization step.
This will call :meth:`torch.optim.Optimizer.step` on each worker
containing parameters to be optimized, and will block until all workers
return. The provided ``context_id`` will be used to retrieve the
corresponding :class:`~torch.distributed.autograd.context` that
contains the gradients that should be applied to the parameters.
Args:
context_id: the autograd context id for which we should run the
optimizer step.
"""
dist_autograd._is_valid_context(context_id)
if self.is_functional_optim:
optimizer_step_func = _script_local_optimizer_step
else:
optimizer_step_func = _local_optimizer_step
rpc_futs = []
for optimizer in self.remote_optimizers:
rpc_futs.append(rpc.rpc_async(
optimizer.owner(),
optimizer_step_func,
args=(optimizer, context_id),
))
_wait_for_all(rpc_futs)
| pytorch-master | torch/distributed/optim/optimizer.py |
from typing import List, Dict, Optional
import torch
import torch.optim._functional as F
from torch import Tensor
__all__ : List[str] = []
# Define a TorchScript compatible Functional Adadelta Optimizer
# where we use these optimizer in a functional way.
# Instead of using the `param.grad` when updating parameters,
# we explicitly allow the distributed optimizer pass gradients to
# the `step` function. In this way, we could separate the gradients
# and parameters and allow multithreaded trainer to update the
# parameters without data traces on accumulating to the same .grad.
# NOTE: This should be only used by distributed optimizer internals
# and not meant to expose to the user.
@torch.jit.script
class _FunctionalAdadelta(object):
def __init__(
self,
params: List[Tensor],
lr: float = 1.0,
rho: float = 0.9,
eps: float = 1e-6,
weight_decay: float = 0.0,
foreach: bool = False,
maximize: bool = False,
_allow_empty_param_list: bool = False,
):
self.defaults = {
"lr": lr,
"rho": rho,
"eps": eps,
"weight_decay": weight_decay,
}
self.foreach = foreach
self.maximize = maximize
if len(params) == 0 and not _allow_empty_param_list:
raise ValueError("optimizer got an empty parameter list")
# NOTE: we only have one param_group and don't allow user to add additional
# param group as it's not a common use case.
self.param_group = {"params": params}
self.state = torch.jit.annotate(Dict[torch.Tensor, Dict[str, torch.Tensor]], {})
def step(self, gradients: List[Optional[Tensor]]):
params = self.param_group['params']
params_with_grad = []
grads = []
square_avgs = []
acc_deltas = []
lr = self.defaults['lr']
rho = self.defaults['rho']
eps = self.defaults['eps']
weight_decay = self.defaults['weight_decay']
if len(params) != len(gradients):
raise ValueError(
"the gradients passed in does not equal to the size of the parameters!"
+ f"Params length: {len(params)}. "
+ f"Gradients length: {len(gradients)}"
)
for param, gradient in zip(params, gradients):
if gradient is not None:
params_with_grad.append(param)
grads.append(gradient)
# Lazy state initialization
if param not in self.state:
self.state[param] = {}
state = self.state[param]
state['step'] = torch.tensor(0.0)
state['square_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
state['acc_delta'] = torch.zeros_like(param, memory_format=torch.preserve_format)
state = self.state[param]
square_avgs.append(state['square_avg'])
acc_deltas.append(state['acc_delta'])
with torch.no_grad():
F.adadelta(params_with_grad,
grads,
square_avgs,
acc_deltas,
lr=lr,
rho=rho,
eps=eps,
weight_decay=weight_decay,
foreach=self.foreach,
maximize=self.maximize)
| pytorch-master | torch/distributed/optim/functional_adadelta.py |
from typing import List, Dict, Optional
import torch
import torch.optim._functional as F
from torch import Tensor
__all__ : List[str] = []
# Define a TorchScript compatible Functional RMSprop Optimizer
# where we use these optimizer in a functional way.
# Instead of using the `param.grad` when updating parameters,
# we explicitly allow the distributed optimizer pass gradients to
# the `step` function. In this way, we could separate the gradients
# and parameters and allow multithreaded trainer to update the
# parameters without data traces on accumulating to the same .grad.
# NOTE: This should be only used by distributed optimizer internals
# and not meant to expose to the user.
@torch.jit.script
class _FunctionalRMSprop(object):
def __init__(
self,
params: List[Tensor],
lr: float = 1e-2,
alpha: float = 0.99,
eps: float = 1e-8,
weight_decay: float = 0.0,
momentum: float = 0.0,
centered: bool = False,
foreach: bool = False,
maximize: bool = False,
_allow_empty_param_list: bool = False,
):
self.defaults = {
"lr": lr,
"alpha": alpha,
"eps": eps,
"weight_decay": weight_decay,
"momentum": momentum,
}
self.centered = centered
self.foreach = foreach
self.maximize = maximize
if len(params) == 0 and not _allow_empty_param_list:
raise ValueError("optimizer got an empty parameter list")
# NOTE: we only have one param_group and don't allow user to add additional
# param group as it's not a common use case.
self.param_group = {"params": params}
self.state = torch.jit.annotate(Dict[torch.Tensor, Dict[str, torch.Tensor]], {})
def step(self, gradients: List[Optional[Tensor]]):
params = self.param_group['params']
params_with_grad = []
grads = []
square_avgs = []
grad_avgs = []
momentum_buffer_list = []
lr = self.defaults['lr']
alpha = self.defaults['alpha']
eps = self.defaults['eps']
momentum = self.defaults['momentum']
weight_decay = self.defaults['weight_decay']
if len(params) != len(gradients):
raise ValueError(
"the gradients passed in does not equal to the size of the parameters!"
+ f"Params length: {len(params)}. "
+ f"Gradients length: {len(gradients)}"
)
for param, gradient in zip(params, gradients):
if gradient is not None:
params_with_grad.append(param)
grads.append(gradient)
# Lazy state initialization
if param not in self.state:
self.state[param] = {}
state = self.state[param]
state['step'] = torch.tensor(0.0)
state['square_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
if momentum > 0:
state['momentum_buffer'] = torch.zeros_like(param, memory_format=torch.preserve_format)
if self.centered:
state['grad_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
state = self.state[param]
square_avgs.append(state['square_avg'])
if momentum > 0:
momentum_buffer_list.append(state['momentum_buffer'])
if self.centered:
grad_avgs.append(state['grad_avg'])
state['step'] += 1
with torch.no_grad():
F.rmsprop(params_with_grad,
grads,
square_avgs,
grad_avgs,
momentum_buffer_list,
lr=lr,
alpha=alpha,
eps=eps,
weight_decay=weight_decay,
momentum=momentum,
centered=self.centered,
foreach=self.foreach,
maximize=self.maximize)
| pytorch-master | torch/distributed/optim/functional_rmsprop.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.