diff --git a/ckpts/universal/global_step120/zero/20.input_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/20.input_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..d0f98e5e47d1c36a4298572cc83ff3b39c0aa857 --- /dev/null +++ b/ckpts/universal/global_step120/zero/20.input_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a1c89e3ff5511b8fa237a7bba96056bab5abff289d3c9d0f1c229fd0d3419d0 +size 9372 diff --git a/ckpts/universal/global_step120/zero/26.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/26.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..f531958c1805df26c356df3bff088b1ca78a30a8 --- /dev/null +++ b/ckpts/universal/global_step120/zero/26.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b819bb73fe44f2e29901c6f9c69ae883dcabf3ea541886f97df942ac35fef34 +size 16778396 diff --git a/ckpts/universal/global_step120/zero/6.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/6.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..4026c7b33a9ce2a1a95fcc9b710b8f83370cde72 --- /dev/null +++ b/ckpts/universal/global_step120/zero/6.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7eaab1a7e808df17a1d34eefeac1ee7b7eb59defa7f345ebd3f3c07b25e4b942 +size 16778396 diff --git a/ckpts/universal/global_step120/zero/6.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/6.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..05d096ca112ee637403568b0001d7193b4b4376f --- /dev/null +++ b/ckpts/universal/global_step120/zero/6.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e9031785271cec36db29f863f61e9029a4f14e4d7537c671120d928f317f383 +size 16778411 diff --git a/ckpts/universal/global_step120/zero/6.attention.dense.weight/fp32.pt b/ckpts/universal/global_step120/zero/6.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..ff7c8a2305ba2141db2b9bde3a020accfec08807 --- /dev/null +++ b/ckpts/universal/global_step120/zero/6.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58ef333cc07c1471ee20910b212dadb8abefc865e1669d89095e2e27e4012369 +size 16778317 diff --git a/ckpts/universal/global_step120/zero/7.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/7.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..bf1d30ca6b685c38c3b0038c2de9a174a7a1d78e --- /dev/null +++ b/ckpts/universal/global_step120/zero/7.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd79f294691648c9aa9fe0907d4dbb6b941e0c291b598b7c3934672d1c75b917 +size 33555612 diff --git a/ckpts/universal/global_step120/zero/7.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step120/zero/7.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..d1c8571dc62ab57f9f5460c1881d03cd19a70f61 --- /dev/null +++ b/ckpts/universal/global_step120/zero/7.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:625299ec80a1d7781e1e3686abe78807fd2694fded23b0a2099a286777987d8b +size 33555533 diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4842c38ec08e0867327d2ce6f37b3d229e4dbb0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5e1a816faeca71372920198da9562cb4a84ffdc Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98f3104ec1fd9f0f720176aea115f40a23359908 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/local_elastic_agent.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/local_elastic_agent.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb9b263271ec030787f10e829424bae0e65c507f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/local_elastic_agent.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/events/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/events/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..db6cb639ef1c6efd200f2f415e965a3a84f6d5b9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/events/__init__.py @@ -0,0 +1,134 @@ +#!/usr/bin/env/python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +""" +Module contains events processing mechanisms that are integrated with the standard python logging. + +Example of usage: + +:: + + from torch.distributed.elastic import events + event = events.Event(name="test_event", source=events.EventSource.WORKER, metadata={...}) + events.get_logging_handler(destination="console").info(event) + +""" + +import inspect +import logging +import os +import socket +import traceback +from enum import Enum +from typing import Dict, Optional + +from torch.distributed.elastic.events.handlers import get_logging_handler + +from .api import ( # noqa: F401 + Event, + EventMetadataValue, + EventSource, + NodeState, + RdzvEvent, +) + +_events_loggers: Dict[str, logging.Logger] = {} + +def _get_or_create_logger(destination: str = "null") -> logging.Logger: + """ + Construct python logger based on the destination type or extends if provided. + + Available destination could be found in ``handlers.py`` file. + The constructed logger does not propagate messages to the upper level loggers, + e.g. root logger. This makes sure that a single event can be processed once. + + Args: + destination: The string representation of the event handler. + Available handlers found in ``handlers`` module + """ + global _events_loggers + + if destination not in _events_loggers: + _events_logger = logging.getLogger(f"torchelastic-events-{destination}") + _events_logger.setLevel(os.environ.get("LOGLEVEL", "INFO")) + # Do not propagate message to the root logger + _events_logger.propagate = False + + logging_handler = get_logging_handler(destination) + _events_logger.addHandler(logging_handler) + + # Add the logger to the global dictionary + _events_loggers[destination] = _events_logger + + return _events_loggers[destination] + + +def record(event: Event, destination: str = "null") -> None: + _get_or_create_logger(destination).info(event.serialize()) + +def record_rdzv_event(event: RdzvEvent) -> None: + _get_or_create_logger("dynamic_rendezvous").info(event.serialize()) + + +def construct_and_record_rdzv_event( + run_id: str, + message: str, + node_state: NodeState, + name: str = "", + hostname: str = "", + pid: Optional[int] = None, + master_endpoint: str = "", + local_id: Optional[int] = None, + rank: Optional[int] = None, +) -> None: + # We don't want to perform an extra computation if not needed. + if isinstance(get_logging_handler("dynamic_rendezvous"), logging.NullHandler): + return + + # Set up parameters. + if not hostname: + hostname = socket.getfqdn() + if not pid: + pid = os.getpid() + + # Determines which file called this function. + callstack = inspect.stack() + filename = "no_file" + if len(callstack) > 1: + stack_depth_1 = callstack[1] + filename = os.path.basename(stack_depth_1.filename) + if not name: + name = stack_depth_1.function + + # Delete the callstack variable. If kept, this can mess with python's + # garbage collector as we are holding on to stack frame information in + # the inspect module. + del callstack + + # Set up error trace if this is an exception + if node_state == NodeState.FAILED: + error_trace = traceback.format_exc() + else: + error_trace = "" + + # Initialize event object + event = RdzvEvent( + name=f"{filename}:{name}", + run_id=run_id, + message=message, + hostname=hostname, + pid=pid, + node_state=node_state, + master_endpoint=master_endpoint, + rank=rank, + local_id=local_id, + error_trace=error_trace, + ) + + # Finally, record the event. + record_rdzv_event(event) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..786475180f8dc2630874a7c754d7980e63bbde4b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10cf52d37c45bc3404fcc29d2304ce09413ecb9d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/handlers.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/handlers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0d3e1de4470561a99b750cbb5b21108308a501b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/handlers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/events/api.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/events/api.py new file mode 100644 index 0000000000000000000000000000000000000000..62f5d7500922ceb035f1f6229e6edb93acfb1922 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/events/api.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import json +from dataclasses import asdict, dataclass, field +from enum import Enum +from typing import Dict, Union, Optional + +__all__ = ['EventSource', 'Event', 'NodeState', 'RdzvEvent'] + +EventMetadataValue = Union[str, int, float, bool, None] + + +class EventSource(str, Enum): + """Known identifiers of the event producers.""" + + AGENT = "AGENT" + WORKER = "WORKER" + + +@dataclass +class Event: + """ + The class represents the generic event that occurs during the torchelastic job execution. + + The event can be any kind of meaningful action. + + Args: + name: event name. + source: the event producer, e.g. agent or worker + timestamp: timestamp in milliseconds when event occurred. + metadata: additional data that is associated with the event. + """ + + name: str + source: EventSource + timestamp: int = 0 + metadata: Dict[str, EventMetadataValue] = field(default_factory=dict) + + def __str__(self): + return self.serialize() + + @staticmethod + def deserialize(data: Union[str, "Event"]) -> "Event": + if isinstance(data, Event): + return data + if isinstance(data, str): + data_dict = json.loads(data) + data_dict["source"] = EventSource[data_dict["source"]] # type: ignore[possibly-undefined] + return Event(**data_dict) + + def serialize(self) -> str: + return json.dumps(asdict(self)) + + +class NodeState(str, Enum): + """The states that a node can be in rendezvous.""" + + INIT = "INIT" + RUNNING = "RUNNING" + SUCCEEDED = "SUCCEEDED" + FAILED = "FAILED" + + +@dataclass +class RdzvEvent: + """ + Dataclass to represent any rendezvous event. + + Args: + name: Event name. (E.g. Current action being performed) + run_id: The run id of the rendezvous + message: The message describing the event + hostname: Hostname of the node + pid: The process id of the node + node_state: The state of the node (INIT, RUNNING, SUCCEEDED, FAILED) + master_endpoint: The master endpoint for the rendezvous store, if known + rank: The rank of the node, if known + local_id: The local_id of the node, if defined in dynamic_rendezvous.py + error_trace: Error stack trace, if this is an error event. + """ + + name: str + run_id: str + message: str + hostname: str + pid: int + node_state: NodeState + master_endpoint: str = "" + rank: Optional[int] = None + local_id: Optional[int] = None + error_trace: str = "" + + def __str__(self): + return self.serialize() + + @staticmethod + def deserialize(data: Union[str, "RdzvEvent"]) -> "RdzvEvent": + if isinstance(data, RdzvEvent): + return data + if isinstance(data, str): + data_dict = json.loads(data) + data_dict["node_state"] = NodeState[data_dict["node_state"]] # type: ignore[possibly-undefined] + return RdzvEvent(**data_dict) + + def serialize(self) -> str: + return json.dumps(asdict(self)) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/events/handlers.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/events/handlers.py new file mode 100644 index 0000000000000000000000000000000000000000..2a7c16e3fd808db47d257158cedb4aad185d41e6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/events/handlers.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import logging +from typing import Dict + + +_log_handlers: Dict[str, logging.Handler] = { + "console": logging.StreamHandler(), + "dynamic_rendezvous": logging.NullHandler(), + "null": logging.NullHandler(), +} + + +def get_logging_handler(destination: str = "null") -> logging.Handler: + global _log_handlers + return _log_handlers[destination] diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..767abcc1d60b233049f082afc35d047cee1ca742 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__init__.py @@ -0,0 +1,163 @@ +#!/usr/bin/env/python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +"""Metrics API. + +**Overview**: + +The metrics API in torchelastic is used to publish telemetry metrics. +It is designed to be used by torchelastic's internal modules to +publish metrics for the end user with the goal of increasing visibility +and helping with debugging. However you may use the same API in your +jobs to publish metrics to the same metrics ``sink``. + +A ``metric`` can be thought of as timeseries data +and is uniquely identified by the string-valued tuple +``(metric_group, metric_name)``. + +torchelastic makes no assumptions about what a ``metric_group`` is +and what relationship it has with ``metric_name``. It is totally up +to the user to use these two fields to uniquely identify a metric. + +.. note:: The metric group ``torchelastic`` is reserved by torchelastic for + platform level metrics that it produces. + For instance torchelastic may output the latency (in milliseconds) + of a re-rendezvous operation from the agent as + ``(torchelastic, agent.rendezvous.duration.ms)`` + +A sensible way to use metric groups is to map them to a stage or module +in your job. You may also encode certain high level properties +the job such as the region or stage (dev vs prod). + +**Publish Metrics**: + +Using torchelastic's metrics API is similar to using python's logging +framework. You first have to configure a metrics handler before +trying to add metric data. + +The example below measures the latency for the ``calculate()`` function. + +:: + + import time + import torch.distributed.elastic.metrics as metrics + + # makes all metrics other than the one from "my_module" to go /dev/null + metrics.configure(metrics.NullMetricsHandler()) + metrics.configure(metrics.ConsoleMetricsHandler(), "my_module") + + def my_method(): + start = time.time() + calculate() + end = time.time() + metrics.put_metric("calculate_latency", int(end-start), "my_module") + +You may also use the torch.distributed.elastic.metrics.prof` decorator +to conveniently and succinctly profile functions + +:: + + # -- in module examples.foobar -- + + import torch.distributed.elastic.metrics as metrics + + metrics.configure(metrics.ConsoleMetricsHandler(), "foobar") + metrics.configure(metrics.ConsoleMetricsHandler(), "Bar") + + @metrics.prof + def foo(): + pass + + class Bar(): + + @metrics.prof + def baz(): + pass + +``@metrics.prof`` will publish the following metrics +:: + + .success - 1 if the function finished successfully + .failure - 1 if the function threw an exception + .duration.ms - function duration in milliseconds + +**Configuring Metrics Handler**: + +`torch.distributed.elastic.metrics.MetricHandler` is responsible for emitting +the added metric values to a particular destination. Metric groups can be +configured with different metric handlers. + +By default torchelastic emits all metrics to ``/dev/null``. +By adding the following configuration metrics, +``torchelastic`` and ``my_app`` metric groups will be printed out to +console. + +:: + + import torch.distributed.elastic.metrics as metrics + + metrics.configure(metrics.ConsoleMetricHandler(), group = "torchelastic") + metrics.configure(metrics.ConsoleMetricHandler(), group = "my_app") + +**Writing a Custom Metric Handler**: + +If you want your metrics to be emitted to a custom location, implement +the `torch.distributed.elastic.metrics.MetricHandler` interface +and configure your job to use your custom metric handler. + +Below is a toy example that prints the metrics to ``stdout`` + +:: + + import torch.distributed.elastic.metrics as metrics + + class StdoutMetricHandler(metrics.MetricHandler): + def emit(self, metric_data): + ts = metric_data.timestamp + group = metric_data.group_name + name = metric_data.name + value = metric_data.value + print(f"[{ts}][{group}]: {name}={value}") + + metrics.configure(StdoutMetricHandler(), group="my_app") + +Now all metrics in the group ``my_app`` will be printed to stdout as: + +:: + + [1574213883.4182858][my_app]: my_metric= + [1574213940.5237644][my_app]: my_metric= + +""" + +from typing import Optional + +from .api import ( # noqa: F401 + ConsoleMetricHandler, + MetricData, + MetricHandler, + MetricsConfig, + NullMetricHandler, + configure, + get_elapsed_time_ms, + getStream, + prof, + profile, + publish_metric, + put_metric, +) + + +def initialize_metrics(cfg: Optional[MetricsConfig] = None): + pass + + +try: + from torch.distributed.elastic.metrics.static_init import * # type: ignore[import] # noqa: F401 F403 +except ModuleNotFoundError: + pass diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..981b6ab54b39496bc0e5552673af2b72fa13a800 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e975493862a0e527537450c9bb1cdbcdea1c1cb Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/metrics/api.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/metrics/api.py new file mode 100644 index 0000000000000000000000000000000000000000..1499943c78d24d0fdaac31526318c3067743c79c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/metrics/api.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import abc +import time +import warnings +from collections import namedtuple +from functools import wraps +from typing import Dict, Optional + +__all__ = ['MetricsConfig', 'MetricHandler', 'ConsoleMetricHandler', 'NullMetricHandler', 'MetricStream', + 'configure', 'getStream', 'prof', 'profile', 'put_metric', 'publish_metric', 'get_elapsed_time_ms', + 'MetricData'] + +MetricData = namedtuple("MetricData", ["timestamp", "group_name", "name", "value"]) + + +class MetricsConfig: + __slots__ = ["params"] + + def __init__(self, params: Optional[Dict[str, str]] = None): + self.params = params + if self.params is None: + self.params = {} + + +class MetricHandler(abc.ABC): + @abc.abstractmethod + def emit(self, metric_data: MetricData): + pass + + +class ConsoleMetricHandler(MetricHandler): + def emit(self, metric_data: MetricData): + print( + f"[{metric_data.timestamp}][{metric_data.group_name}]: {metric_data.name}={metric_data.value}" + ) + + +class NullMetricHandler(MetricHandler): + def emit(self, metric_data: MetricData): + pass + + +class MetricStream: + def __init__(self, group_name: str, handler: MetricHandler): + self.group_name = group_name + self.handler = handler + + def add_value(self, metric_name: str, metric_value: int): + self.handler.emit( + MetricData(time.time(), self.group_name, metric_name, metric_value) + ) + + +_metrics_map: Dict[str, MetricHandler] = {} +_default_metrics_handler: MetricHandler = NullMetricHandler() + + +# pyre-fixme[9]: group has type `str`; used as `None`. +def configure(handler: MetricHandler, group: Optional[str] = None): + if group is None: + global _default_metrics_handler + # pyre-fixme[9]: _default_metrics_handler has type `NullMetricHandler`; used + # as `MetricHandler`. + _default_metrics_handler = handler + else: + _metrics_map[group] = handler + + +def getStream(group: str): + if group in _metrics_map: + handler = _metrics_map[group] + else: + handler = _default_metrics_handler + return MetricStream(group, handler) + + +def _get_metric_name(fn): + qualname = fn.__qualname__ + split = qualname.split(".") + if len(split) == 1: + module = fn.__module__ + if module: + return module.split(".")[-1] + "." + split[0] + else: + return split[0] + else: + return qualname + + +def prof(fn=None, group: str = "torchelastic"): + r""" + @profile decorator publishes duration.ms, count, success, failure metrics for the function that it decorates. + + The metric name defaults to the qualified name (``class_name.def_name``) of the function. + If the function does not belong to a class, it uses the leaf module name instead. + + Usage + + :: + + @metrics.prof + def x(): + pass + + @metrics.prof(group="agent") + def y(): + pass + """ + + def wrap(f): + @wraps(f) + def wrapper(*args, **kwargs): + key = _get_metric_name(f) + try: + start = time.time() + result = f(*args, **kwargs) + put_metric(f"{key}.success", 1, group) + except Exception: + put_metric(f"{key}.failure", 1, group) + raise + finally: + put_metric(f"{key}.duration.ms", get_elapsed_time_ms(start), group) # type: ignore[possibly-undefined] + return result + + return wrapper + + if fn: + return wrap(fn) + else: + return wrap + + +def profile(group=None): + """ + @profile decorator adds latency and success/failure metrics to any given function. + + Usage + + :: + + @metrics.profile("my_metric_group") + def some_function(): + """ + warnings.warn("Deprecated, use @prof instead", DeprecationWarning) + + def wrap(func): + @wraps(func) + def wrapper(*args, **kwargs): + try: + start_time = time.time() + result = func(*args, **kwargs) + publish_metric(group, f"{func.__name__}.success", 1) + except Exception: + publish_metric(group, f"{func.__name__}.failure", 1) + raise + finally: + publish_metric( + group, + f"{func.__name__}.duration.ms", + get_elapsed_time_ms(start_time), # type: ignore[possibly-undefined] + ) + return result + + return wrapper + + return wrap + + +def put_metric(metric_name: str, metric_value: int, metric_group: str = "torchelastic"): + """ + Publish a metric data point. + + Usage + + :: + + put_metric("metric_name", 1) + put_metric("metric_name", 1, "metric_group_name") + """ + getStream(metric_group).add_value(metric_name, metric_value) + + +def publish_metric(metric_group: str, metric_name: str, metric_value: int): + warnings.warn( + "Deprecated, use put_metric(metric_group)(metric_name, metric_value) instead" + ) + metric_stream = getStream(metric_group) + metric_stream.add_value(metric_name, metric_value) + + +def get_elapsed_time_ms(start_time_in_seconds: float): + """Return the elapsed time in millis from the given start time.""" + end_time = time.time() + return int((end_time - start_time_in_seconds) * 1000) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7e6a55406f0334a7199d5fc4a653ddc7f744910 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__init__.py @@ -0,0 +1,235 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +""" +Library that launches and manages ``n`` copies of worker subprocesses either specified by a function or a binary. + +For functions, it uses ``torch.multiprocessing`` (and therefore python +``multiprocessing``) to spawn/fork worker processes. For binaries it uses python +``subprocessing.Popen`` to create worker processes. + + +Usage 1: Launching two trainers as a function + +:: + + from torch.distributed.elastic.multiprocessing import Std, start_processes + + def trainer(a, b, c): + pass # train + + + # runs two trainers + # LOCAL_RANK=0 trainer(1,2,3) + # LOCAL_RANK=1 trainer(4,5,6) + ctx = start_processes( + name="trainer", + entrypoint=trainer, + args={0: (1,2,3), 1: (4,5,6)}, + envs={0: {"LOCAL_RANK": 0}, 1: {"LOCAL_RANK": 1}}, + log_dir="/tmp/foobar", + redirects=Std.ALL, # write all worker stdout/stderr to a log file + tee={0: Std.ERR}, # tee only local rank 0's stderr to console + ) + + # waits for all copies of trainer to finish + ctx.wait() + +Usage 2: Launching 2 echo workers as a binary + +:: + + # same as invoking + # echo hello + # echo world > stdout.log + ctx = start_processes( + name="echo" + entrypoint="echo", + log_dir="/tmp/foobar", + args={0: "hello", 1: "world"}, + redirects={1: Std.OUT}, + ) + +Just like ``torch.multiprocessing``, the return value of the function +:func:`start_processes` is a process context (:class:`api.PContext`). If a function +was launched, a :class:`api.MultiprocessContext` is returned and if a binary +was launched a :class:`api.SubprocessContext` is returned. Both are specific +implementations of the parent :class:`api.PContext` class. +""" + +import os +from typing import Callable, Dict, Optional, Tuple, Union, Set + +from torch.distributed.elastic.multiprocessing.api import ( # noqa: F401 + _validate_full_rank, + DefaultLogsSpecs, + LogsDest, + LogsSpecs, + MultiprocessContext, + PContext, + ProcessFailure, + RunProcsResult, + SignalException, + Std, + SubprocessContext, + to_map, +) +from torch.distributed.elastic.utils.logging import get_logger + +__all__ = [ + "start_processes", + "MultiprocessContext", + "PContext", + "ProcessFailure", + "RunProcsResult", + "SignalException", + "Std", + "LogsDest", + "LogsSpecs", + "DefaultLogsSpecs", + "SubprocessContext", + "to_map", +] + +log = get_logger(__name__) + + +def start_processes( + name: str, + entrypoint: Union[Callable, str], + args: Dict[int, Tuple], + envs: Dict[int, Dict[str, str]], + logs_specs: LogsSpecs, + log_line_prefixes: Optional[Dict[int, str]] = None, + start_method: str = "spawn", +) -> PContext: + """ + Start ``n`` copies of ``entrypoint`` processes with the provided options. + + ``entrypoint`` is either a ``Callable`` (function) or a ``str`` (binary). + The number of copies is determined by the number of entries for ``args`` and + ``envs`` arguments, which need to have the same key set. + + ``args`` and ``env`` parameters are the arguments and environment variables + to pass down to the entrypoint mapped by the replica index (local rank). + All local ranks must be accounted for. + That is, the keyset should be ``{0,1,...,(nprocs-1)}``. + + .. note:: When the ``entrypoint`` is a binary (``str``), ``args`` can only be strings. + If any other type is given, then it is casted to a string representation + (e.g. ``str(arg1)``). Furthermore, a binary failure will only write + an ``error.json`` error file if the main function is annotated with + ``torch.distributed.elastic.multiprocessing.errors.record``. For function launches, + this is done by default and there is no need to manually annotate + with the ``@record`` annotation. + + ``redirects`` and ``tee`` are bitmasks specifying which std stream(s) to redirect + to a log file in the ``log_dir``. Valid mask values are defined in ``Std``. + To redirect/tee only certain local ranks, pass ``redirects`` as a map with the key as + the local rank to specify the redirect behavior for. + Any missing local ranks will default to ``Std.NONE``. + + ``tee`` acts like the unix "tee" command in that it redirects + prints to console. + To avoid worker stdout/stderr from printing to console, use the ``redirects`` parameter. + + For each process, the ``log_dir`` will contain: + + #. ``{local_rank}/error.json``: if the process failed, a file with the error info + #. ``{local_rank}/stdout.json``: if ``redirect & STDOUT == STDOUT`` + #. ``{local_rank}/stderr.json``: if ``redirect & STDERR == STDERR`` + + .. note:: It is expected that the ``log_dir`` exists, is empty, and is a directory. + + Example: + :: + + log_dir = "/tmp/test" + + # ok; two copies of foo: foo("bar0"), foo("bar1") + start_processes( + name="trainer", + entrypoint=foo, + args:{0:("bar0",), 1:("bar1",), + envs:{0:{}, 1:{}}, + log_dir=log_dir + ) + + # invalid; envs missing for local rank 1 + start_processes( + name="trainer", + entrypoint=foo, + args:{0:("bar0",), 1:("bar1",), + envs:{0:{}}, + log_dir=log_dir + ) + + # ok; two copies of /usr/bin/touch: touch file1, touch file2 + start_processes( + name="trainer", + entrypoint="/usr/bin/touch", + args:{0:("file1",), 1:("file2",), + envs:{0:{}, 1:{}}, + log_dir=log_dir + ) + + # caution; arguments casted to string, runs: + # echo "1" "2" "3" and echo "[1, 2, 3]" + start_processes( + name="trainer", + entrypoint="/usr/bin/echo", + args:{0:(1,2,3), 1:([1,2,3],), + envs:{0:{}, 1:{}}, + log_dir=log_dir + ) + + Args: + name: a human readable short name that describes what the processes are + (used as header when tee'ing stdout/stderr outputs) + entrypoint: either a ``Callable`` (function) or ``cmd`` (binary) + args: arguments to each replica + envs: env vars to each replica + log_dir: directory used to write log files + start_method: multiprocessing start method (spawn, fork, forkserver) + ignored for binaries + redirects: which std streams to redirect to a log file + tee: which std streams to redirect + print to console + local_ranks_filter: which ranks' logs to print to console + + """ + + nprocs = len(args) + _validate_full_rank(args, nprocs, "args") + _validate_full_rank(envs, nprocs, "envs") + + context: PContext + if isinstance(entrypoint, str): + context = SubprocessContext( + name=name, + entrypoint=entrypoint, + args=args, + envs=envs, + logs_specs=logs_specs, + log_line_prefixes=log_line_prefixes, + ) + else: + context = MultiprocessContext( + name=name, + entrypoint=entrypoint, + args=args, + envs=envs, + log_line_prefixes=log_line_prefixes, + start_method=start_method, + logs_specs=logs_specs, + ) + + try: + context.start() + return context + except Exception: + context.close() + raise diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..388f7e473f27eb8cb32b08f58cabdd0dbb0ca96c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f78379bffc073f1e1a559428a8bb185d1d7e3828 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/redirects.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/redirects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a5b8535a2c24702ea6da0c5f796d0eef3d460ac Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/redirects.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/tail_log.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/tail_log.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35d2924aa04996e817430afb1e4fa62b6cd8cbb1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/tail_log.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/api.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/api.py new file mode 100644 index 0000000000000000000000000000000000000000..9a6158e760f0e5cfa36dfc7dc303b9ec788241a6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/api.py @@ -0,0 +1,873 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import abc +import logging +import os +import re +import shutil +import signal +import subprocess +import sys +import tempfile +import time +from contextlib import nullcontext +from dataclasses import dataclass, field +from enum import IntFlag +from multiprocessing import synchronize +from types import FrameType +from typing import Any, Callable, Dict, Optional, Set, Tuple, Union +from abc import ABC, abstractmethod + +import torch.multiprocessing as mp +from torch.distributed.elastic.multiprocessing.errors import ProcessFailure, record +from torch.distributed.elastic.multiprocessing.redirects import ( + redirect_stderr, + redirect_stdout, +) + +from torch.distributed.elastic.multiprocessing.subprocess_handler import SubprocessHandler, get_subprocess_handler +from torch.distributed.elastic.multiprocessing.tail_log import TailLog + +IS_WINDOWS = sys.platform == "win32" +IS_MACOS = sys.platform == "darwin" + + +log = logging.getLogger(__name__) + +__all__ = [ + "DefaultLogsSpecs", + "SignalException", + "Std", + "to_map", + "RunProcsResult", + "PContext", + "get_std_cm", + "MultiprocessContext", + "SubprocessContext", +] + +class SignalException(Exception): + """ + Exception is raised inside the torchelastic agent process by the termination handler + if the death signal got received by the process. + """ + + def __init__(self, msg: str, sigval: signal.Signals) -> None: + super().__init__(msg) + self.sigval = sigval + + +def _terminate_process_handler(signum: int, frame: Optional[FrameType]) -> None: + """Termination handler that raises exceptions on the main process. + + When the process receives death signal(SIGTERM, SIGINT), this termination handler will + be invoked. It raises the ``SignalException`` exception that should be processed by the + user code. Python does not terminate process after the termination handler is finished, + so the exception should not be silently ignored, otherwise the process will never + be terminated. + """ + sigval = signal.Signals(signum) + raise SignalException(f"Process {os.getpid()} got signal: {sigval}", sigval=sigval) + + +def _get_kill_signal() -> signal.Signals: + """Get the kill signal. SIGKILL for unix, CTRL_C_EVENT for windows.""" + if IS_WINDOWS: + return signal.CTRL_C_EVENT # type: ignore[attr-defined] # noqa: F821 + else: + return signal.SIGKILL + + +def _get_default_signal() -> signal.Signals: + """Get the default termination signal. SIGTERM for unix, CTRL_C_EVENT for windows.""" + if IS_WINDOWS: + return signal.CTRL_C_EVENT # type: ignore[attr-defined] # noqa: F821 + else: + return signal.SIGTERM + + +def _validate_full_rank(d: Dict[int, Any], nprocs: int, what: str): + actual_keys = set(d.keys()) + expected_keys = set(range(nprocs)) + + if actual_keys != expected_keys: + raise RuntimeError( + f"{what}, local rank mapping mismatch," + f" expected: {expected_keys}, actual: {actual_keys}" + ) + + +_MAPPING_REGEX = r"^(\d:[0123],)*(\d:[0123])$" +_VALUE_REGEX = r"^[0123]$" + + +class Std(IntFlag): + NONE = 0 + OUT = 1 + ERR = 2 + ALL = OUT | ERR + + @classmethod + def from_str(cls, vm: str) -> Union["Std", Dict[int, "Std"]]: + """ + Example: + :: + + from_str("0") -> Std.NONE + from_str("1") -> Std.OUT + from_str("0:3,1:0,2:1,3:2") -> {0: Std.ALL, 1: Std.NONE, 2: Std.OUT, 3: Std.ERR} + + Any other input raises an exception + """ + + def to_std(v: str) -> Std: # type: ignore[return] + s = Std(int(v)) + if s in Std: + return s + # return None -> should NEVER reach here since we regex check input + + if re.match(_VALUE_REGEX, vm): # vm is a number (e.g. 0) + return to_std(vm) + elif re.match(_MAPPING_REGEX, vm): # vm is a mapping (e.g. 0:1,1:2) + d: Dict[int, Std] = {} + for m in vm.split(","): + i, v = m.split(":") + d[int(i)] = to_std(v) + return d + else: + raise ValueError( + f"{vm} does not match: <{_VALUE_REGEX}> or <{_MAPPING_REGEX}>" + ) + + +def to_map( + val_or_map: Union[Std, Dict[int, Std]], local_world_size: int +) -> Dict[int, Std]: + """ + Certain APIs take redirect settings either as a single value (e.g. apply to all + local ranks) or as an explicit user-provided mapping. This method is a convenience + method that converts a value or mapping into a mapping. + + Example: + :: + + to_map(Std.OUT, local_world_size=2) # returns: {0: Std.OUT, 1: Std.OUT} + to_map({1: Std.OUT}, local_world_size=2) # returns: {0: Std.NONE, 1: Std.OUT} + to_map({0: Std.OUT, 1: Std.OUT}, local_world_size=2) # returns: {0: Std.OUT, 1: Std.OUT} + """ + if isinstance(val_or_map, Std): + return dict.fromkeys(range(local_world_size), val_or_map) + else: + map = {} + for i in range(local_world_size): + map[i] = val_or_map.get(i, Std.NONE) + return map + + +@dataclass +class LogsDest: + """ + For each log type, holds mapping of local rank ids to file paths. + """ + stdouts: Dict[int, str] = field(default_factory=dict) + stderrs: Dict[int, str] = field(default_factory=dict) + tee_stdouts: Dict[int, str] = field(default_factory=dict) + tee_stderrs: Dict[int, str] = field(default_factory=dict) + error_files: Dict[int, str] = field(default_factory=dict) + + +class LogsSpecs(ABC): + """ + Defines logs processing and redirection for each worker process. + + Args: + log_dir: + Base directory where logs will be written. + redirects: + Streams to redirect to files. Pass a single ``Std`` + enum to redirect for all workers, or a mapping keyed + by local_rank to selectively redirect. + tee: + Streams to duplicate to stdout/stderr. + Pass a single ``Std`` enum to duplicate streams for all workers, + or a mapping keyed by local_rank to selectively duplicate. + """ + + def __init__( + self, + log_dir: Optional[str] = None, + redirects: Union[Std, Dict[int, Std]] = Std.NONE, + tee: Union[Std, Dict[int, Std]] = Std.NONE, + local_ranks_filter: Optional[Set[int]] = None, + ) -> None: + self._root_log_dir = log_dir + self._redirects = redirects + self._tee = tee + self._local_ranks_filter = local_ranks_filter + + @abstractmethod + def reify(self, envs: Dict[int, Dict[str, str]],) -> LogsDest: + """ + Given the environment variables, builds destination of log files for each of the local ranks. + + Envs parameter contains env variables dict for each of the local ranks, where entries are defined in: + :func:`~torchelastic.distributed.elastic.agent.server.local_elastic_agent.LocalElasticAgent._start_workers`. + """ + pass + + @property + @abstractmethod + def root_log_dir(self) -> str: + pass + +class DefaultLogsSpecs(LogsSpecs): + """ + Default LogsSpecs implementation: + + - `log_dir` will be created if it doesn't exist + - Generates nested folders for each attempt and rank. + """ + def __init__( + self, + log_dir: Optional[str] = None, + redirects: Union[Std, Dict[int, Std]] = Std.NONE, + tee: Union[Std, Dict[int, Std]] = Std.NONE, + local_ranks_filter: Optional[Set[int]] = None, + ) -> None: + if log_dir != os.devnull: + if not log_dir: + log_dir = tempfile.mkdtemp(prefix="torchelastic_") + elif not os.path.exists(log_dir): + os.makedirs(log_dir) + else: + if os.path.isfile(log_dir): + raise NotADirectoryError(f"log_dir: {log_dir} is a file") + super().__init__(log_dir, redirects, tee, local_ranks_filter) + # initialized only once + self._run_log_dir = None + + @property + def root_log_dir(self) -> str: + return str(self._root_log_dir) + + def _make_log_dir(self, log_dir: Optional[str], rdzv_run_id: str): + base_log_dir = log_dir or tempfile.mkdtemp(prefix="torchelastic_") + os.makedirs(base_log_dir, exist_ok=True) + dir = tempfile.mkdtemp(prefix=f"{rdzv_run_id}_", dir=base_log_dir) + log.info("log directory set to: %s", dir) + return dir + + def reify(self, envs: Dict[int, Dict[str, str]],) -> LogsDest: + """ + Uses following scheme to build log destination paths: + + - `//attempt_//stdout.log` + - `//attempt_//stderr.log` + - `//attempt_//error.json` + """ + nprocs = len(envs) + global_env = {} # use only to query properies that are not dependent on a rank + if nprocs > 0: + global_env = envs[0] + else: + log.warning("Empty envs map provided when defining logging destinations.") + # Keys are always defined, but values can be missing in unit tests + run_id = global_env.get("TORCHELASTIC_RUN_ID", "test_run_id") + restart_count = global_env.get("TORCHELASTIC_RESTART_COUNT", "0") + + attempt_log_dir: str = "" + if self._root_log_dir != os.devnull: + if not self._run_log_dir: + self._run_log_dir = self._make_log_dir(self._root_log_dir, run_id) + + attempt_log_dir = os.path.join(self._run_log_dir, f"attempt_{restart_count}") # type: ignore[call-overload] + shutil.rmtree(attempt_log_dir, ignore_errors=True) + os.makedirs(attempt_log_dir) + + if self._root_log_dir == os.devnull: + attempt_log_dir = os.devnull + + # create subdirs for each local rank in the logs_dir + # logs_dir + # |- 0 + # |- error.json + # |- stdout.log + # |- stderr.log + # |- ... + # |- (nprocs-1) + redirs = to_map(self._redirects, nprocs) + ts = to_map(self._tee, nprocs) + + # to tee stdout/stderr we first redirect into a file + # then tail -f stdout.log/stderr.log so add tee settings to redirects + for local_rank, tee_std in ts.items(): + redirect_std = redirs[local_rank] + redirs[local_rank] = redirect_std | tee_std + + SYS_STREAM = "" # special case to indicate to output to console + stdouts = dict.fromkeys(range(nprocs), SYS_STREAM) + stderrs = dict.fromkeys(range(nprocs), SYS_STREAM) + tee_stdouts: Dict[int, str] = {} + tee_stderrs: Dict[int, str] = {} + error_files = {} + + for local_rank in range(nprocs): + + if attempt_log_dir == os.devnull: + tee_stdouts[local_rank] = os.devnull + tee_stderrs[local_rank] = os.devnull + error_files[local_rank] = os.devnull + envs[local_rank]["TORCHELASTIC_ERROR_FILE"] = "" + else: + clogdir = os.path.join(attempt_log_dir, str(local_rank)) + os.mkdir(clogdir) + + rd = redirs[local_rank] + if (rd & Std.OUT) == Std.OUT: + stdouts[local_rank] = os.path.join(clogdir, "stdout.log") + if (rd & Std.ERR) == Std.ERR: + stderrs[local_rank] = os.path.join(clogdir, "stderr.log") + + t = ts[local_rank] + if t & Std.OUT == Std.OUT: + tee_stdouts[local_rank] = stdouts[local_rank] + if t & Std.ERR == Std.ERR: + tee_stderrs[local_rank] = stderrs[local_rank] + + if self._local_ranks_filter and local_rank not in self._local_ranks_filter: + # If stream is tee'd, only write to file, but don't tail + if local_rank in tee_stdouts: + tee_stdouts.pop(local_rank, None) + if local_rank in tee_stderrs: + tee_stderrs.pop(local_rank, None) + + # If stream is not redirected, don't print + if stdouts[local_rank] == SYS_STREAM: + stdouts[local_rank] = os.devnull + if stderrs[local_rank] == SYS_STREAM: + stderrs[local_rank] = os.devnull + + error_file = os.path.join(clogdir, "error.json") + error_files[local_rank] = error_file + log.info("Setting worker%s reply file to: %s", local_rank, error_file) + envs[local_rank]["TORCHELASTIC_ERROR_FILE"] = error_file + + return LogsDest(stdouts, stderrs, tee_stdouts, tee_stderrs, error_files) + + def __repr__(self) -> str: + return ( + f"DefaultLogsSpecs(root_log_dir={self._root_log_dir}, redirects={self._redirects}, " + f"tee={self._tee}, local_ranks_filter={self._local_ranks_filter})" + ) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, DefaultLogsSpecs): + return False + + return ( + self._root_log_dir == other._root_log_dir + and self._redirects == other._redirects + and self._tee == other._tee + and self._local_ranks_filter == other._local_ranks_filter + ) + + +@dataclass +class RunProcsResult: + """ + Results of a completed run of processes started with ``start_processes()``. Returned by ``PContext``. + + Note the following: + + 1. All fields are mapped by local rank + 2. ``return_values`` - only populated for functions (not the binaries). + 3. ``stdouts`` - path to stdout.log (empty string if no redirect) + 4. ``stderrs`` - path to stderr.log (empty string if no redirect) + + """ + + return_values: Dict[int, Any] = field(default_factory=dict) + failures: Dict[int, ProcessFailure] = field(default_factory=dict) + stdouts: Dict[int, str] = field(default_factory=dict) + stderrs: Dict[int, str] = field(default_factory=dict) + + def is_failed(self) -> bool: + return len(self.failures) > 0 + + +class PContext(abc.ABC): + """ + The base class that standardizes operations over a set of processes that are launched via different mechanisms. + + The name ``PContext`` is intentional to disambiguate with ``torch.multiprocessing.ProcessContext``. + + .. warning:: stdouts and stderrs should ALWAYS be a superset of + tee_stdouts and tee_stderrs (respectively) this is b/c + tee is implemented as a redirect + tail -f + """ + + def __init__( + self, + name: str, + entrypoint: Union[Callable, str], + args: Dict[int, Tuple], + envs: Dict[int, Dict[str, str]], + logs_specs: LogsSpecs, + log_line_prefixes: Optional[Dict[int, str]] = None, + + ): + self.name = name + # validate that all mappings have the same number of keys and + # all local ranks are accounted for + nprocs = len(args) + + # TODO log_line_prefixes can be exanded too + logs_dest = logs_specs.reify(envs) + + _validate_full_rank(logs_dest.stdouts, nprocs, "stdouts") + _validate_full_rank(logs_dest.stderrs, nprocs, "stderrs") + + self.entrypoint = entrypoint + self.args = args + self.envs = envs + self.stdouts = logs_dest.stdouts + self.stderrs = logs_dest.stderrs + self.error_files = logs_dest.error_files + self.nprocs = nprocs + + self._stdout_tail = TailLog(name, logs_dest.tee_stdouts, sys.stdout, log_line_prefixes) + self._stderr_tail = TailLog(name, logs_dest.tee_stderrs, sys.stderr, log_line_prefixes) + + def start(self) -> None: + """Start processes using parameters defined in the constructor.""" + signal.signal(signal.SIGTERM, _terminate_process_handler) + signal.signal(signal.SIGINT, _terminate_process_handler) + if not IS_WINDOWS: + signal.signal(signal.SIGHUP, _terminate_process_handler) + signal.signal(signal.SIGQUIT, _terminate_process_handler) + self._start() + self._stdout_tail.start() + self._stderr_tail.start() + + @abc.abstractmethod + def _start(self) -> None: + """Start processes using strategy defined in a particular context.""" + raise NotImplementedError() + + @abc.abstractmethod + def _poll(self) -> Optional[RunProcsResult]: + """ + Poll the run status of the processes running under this context. + This method follows an "all-or-nothing" policy and returns + a ``RunProcessResults`` object if either all processes complete + successfully or any process fails. Returns ``None`` if + all processes are still running. + """ + raise NotImplementedError() + + def wait(self, timeout: float = -1, period: float = 1) -> Optional[RunProcsResult]: + """ + Wait for the specified ``timeout`` seconds, polling every ``period`` seconds + for the processes to be done. Returns ``None`` if the processes are still running + on timeout expiry. Negative timeout values are interpreted as "wait-forever". + A timeout value of zero simply queries the status of the processes (e.g. equivalent + to a poll). + + ..note: Multiprocessing library registers SIGTERM and SIGINT signal handlers that raise + ``SignalException`` when the signals received. It is up to the consumer of the code + to properly handle the exception. It is important not to swallow the exception otherwise + the process would not terminate. Example of the typical workflow can be: + + .. code-block:: python + pc = start_processes(...) + try: + pc.wait(1) + .. do some other work + except SignalException as e: + pc.shutdown(e.sigval, timeout=30) + + If SIGTERM or SIGINT occurs, the code above will try to shutdown child processes by propagating + received signal. If child processes will not terminate in the timeout time, the process will send + the SIGKILL. + """ + if timeout == 0: + return self._poll() + + if timeout < 0: + timeout = sys.maxsize + + expiry = time.time() + timeout + while time.time() < expiry: + pr = self._poll() + if pr: + return pr + time.sleep(period) + + return None + + @abc.abstractmethod + def pids(self) -> Dict[int, int]: + """Return pids of processes mapped by their respective local_ranks.""" + raise NotImplementedError() + + @abc.abstractmethod + def _close(self, death_sig: signal.Signals, timeout: int = 30) -> None: + r""" + Terminates all processes managed by this context and cleans up any + meta resources (e.g. redirect, error_file files). + """ + raise NotImplementedError() + + def close( + self, death_sig: Optional[signal.Signals] = None, timeout: int = 30 + ) -> None: + r""" + Terminates all processes managed by this context and cleans up any + meta resources (e.g. redirect, error_file files). + + Args: + death_sig: Death signal to terminate processes. + timeout: Time to wait for processes to finish, if process is + still alive after this time, it will be terminated via SIGKILL. + """ + if not death_sig: + death_sig = _get_default_signal() + self._close(death_sig=death_sig, timeout=timeout) + if self._stdout_tail: + self._stdout_tail.stop() + if self._stderr_tail: + self._stderr_tail.stop() + + +def get_std_cm(std_rd: str, redirect_fn): + if IS_WINDOWS or IS_MACOS or not std_rd: + return nullcontext() + else: + return redirect_fn(std_rd) + + +def _wrap( + local_rank: int, + fn: Callable, + args: Dict[int, Tuple], + envs: Dict[int, Dict[str, str]], + stdout_redirects: Dict[int, str], # redirect file for stdout (to console if None) + stderr_redirects: Dict[int, str], # redirect file for stderr (to console if None) + ret_vals: Dict[int, mp.SimpleQueue], + queue_finished_reading_event: synchronize.Event, +) -> None: + # get the per-rank params up front so we fail fast if no mapping is found + args_ = args[local_rank] + env_ = envs[local_rank] + ret_val_ = ret_vals[local_rank] + + stdout_rd = stdout_redirects[local_rank] + stderr_rd = stderr_redirects[local_rank] + + stdout_cm = get_std_cm(stdout_rd, redirect_stdout) + stderr_cm = get_std_cm(stderr_rd, redirect_stderr) + + for k, v in env_.items(): + os.environ[k] = v + + with stdout_cm, stderr_cm: + ret = record(fn)(*args_) + ret_val_.put(ret) + queue_finished_reading_event.wait() + + +class MultiprocessContext(PContext): + """``PContext`` holding worker processes invoked as a function.""" + + def __init__( + self, + name: str, + entrypoint: Callable, + args: Dict[int, Tuple], + envs: Dict[int, Dict[str, str]], + start_method: str, + logs_specs: LogsSpecs, + log_line_prefixes: Optional[Dict[int, str]] = None, + ): + super().__init__( + name, + entrypoint, + args, + envs, + logs_specs, + log_line_prefixes, + ) + + self.start_method = start_method + # each ret_val queue will always contain a single element. + self._ret_vals = { + local_rank: mp.get_context(self.start_method).SimpleQueue() + for local_rank in range(self.nprocs) + } + + # see comments in ``join()`` for what this is + self._return_values: Dict[int, Any] = {} + self._pc: Optional[mp.ProcessContext] = None + # Note: set method should ONLY be invoked for the use case when all processes finished + # successfully. If any process died on event.wait() calling set() method will deadlock. + self._worker_finished_event = mp.get_context(self.start_method).Event() + + def _start(self): + if self._pc: + raise ValueError( + "The process context already initialized." + " Most likely the start method got called twice." + ) + self._pc = mp.start_processes( + fn=_wrap, + args=( + self.entrypoint, + self.args, + self.envs, + self.stdouts, + self.stderrs, + self._ret_vals, + self._worker_finished_event, + ), + nprocs=self.nprocs, + join=False, + daemon=False, + start_method=self.start_method, + ) + + def _is_done(self) -> bool: + return len(self._return_values) == self.nprocs + + def _poll(self) -> Optional[RunProcsResult]: + assert self._pc is not None # assertion for mypy type checker + + try: + # torch.mp.ProcessContext Throws an Exception if some/all of + # worker processes failed + # timeout < 0 checks worker status and return immediately + # Join will never return success since we use synchronize.Event to wait + # for all processes to finish. + self._pc.join(-1) + + # IMPORTANT: we use multiprocessing.Queue to carry worker return values + # back to the parent, the worker process will wait before terminating + # until all the buffered items are fed by the feeder thread to the underlying + # pipe. Hence to prevent deadlocks on large return values, + # we opportunistically try queue.get on each join call + # See: https://docs.python.org/2/library/multiprocessing.html#all-platforms + for local_rank in range(0, self.nprocs): + return_queue = self._ret_vals[local_rank] + if not return_queue.empty(): + # save the return values temporarily into a member var + self._return_values[local_rank] = return_queue.get() + + if self._is_done(): + # we should ALWAYS have ALL the return values when all the processes are done + self._worker_finished_event.set() + # Wait untill all processes are finished. At this point workers finished executing + # user function + self._pc.join() + _validate_full_rank( + self._return_values, self.nprocs, "return_value queue" + ) + self.close() + return RunProcsResult( + return_values=self._return_values, + stdouts=self.stdouts, + stderrs=self.stderrs, + ) + else: + return None + except (mp.ProcessRaisedException, mp.ProcessExitedException) as e: + failed_local_rank = e.error_index + + # entrypoint for MultiprocessContext will always be a Callable + fn_name = self.entrypoint.__qualname__ # type: ignore[union-attr] + failed_proc = self._pc.processes[failed_local_rank] + error_filepath = self.error_files[failed_local_rank] + + log.exception( + "failed (exitcode: %s)" + " local_rank: %s (pid: %s)" + " of fn: %s (start_method: %s)", + failed_proc.exitcode, + failed_local_rank, e.pid, + fn_name, self.start_method, + ) + + self.close() + return RunProcsResult( + failures={ + failed_local_rank: ProcessFailure( + local_rank=failed_local_rank, + pid=e.pid, + exitcode=failed_proc.exitcode, + error_file=error_filepath, + ) + }, + stdouts=self.stdouts, + stderrs=self.stderrs, + ) + + def pids(self) -> Dict[int, int]: + assert self._pc is not None # assertion for mypy type checking + return dict(enumerate(self._pc.pids())) + + def _close(self, death_sig: signal.Signals, timeout: int = 30) -> None: + if not self._pc: + return + for proc in self._pc.processes: + if proc.is_alive(): + log.warning("Closing process %s via signal %s", proc.pid, death_sig.name) + try: + os.kill(proc.pid, death_sig) + except ProcessLookupError: + # If the process exited because of some reason, + # `ProcessLookupError` will be raised, it is safe to ignore it. + pass + end = time.monotonic() + timeout + for proc in self._pc.processes: + time_to_wait = end - time.monotonic() + if time_to_wait <= 0: + break + proc.join(time_to_wait) + for proc in self._pc.processes: + if proc.is_alive(): + log.warning( + "Unable to shutdown process %s via %s, forcefully exiting via %s", + proc.pid, death_sig, _get_kill_signal() + ) + try: + os.kill(proc.pid, _get_kill_signal()) + except ProcessLookupError: + # If the process exited because of some reason, + # `ProcessLookupError` will be raised, it is safe to ignore it. + pass + proc.join() + +class SubprocessContext(PContext): + """``PContext`` holding worker processes invoked as a binary.""" + + def __init__( + self, + name: str, + entrypoint: str, + args: Dict[int, Tuple], + envs: Dict[int, Dict[str, str]], + logs_specs: LogsSpecs, + log_line_prefixes: Optional[Dict[int, str]] = None, + + ): + super().__init__( + name, + entrypoint, + args, + envs, + logs_specs, + log_line_prefixes, + ) + + # state vector; _vdone[local_rank] -> is local_rank finished or not + self._running_local_ranks: Set[int] = set(range(self.nprocs)) + self._failures: Dict[int, ProcessFailure] = {} + self.subprocess_handlers: Dict[int, SubprocessHandler] = {} + + def _start(self): + if self.subprocess_handlers: + raise ValueError( + "The subprocess handlers already initialized. Most likely the start method got called twice." + ) + self.subprocess_handlers = { + local_rank: get_subprocess_handler( + entrypoint=self.entrypoint, # type: ignore[arg-type] # entrypoint is always a str + args=self.args[local_rank], + env=self.envs[local_rank], + stdout=self.stdouts[local_rank], + stderr=self.stderrs[local_rank], + local_rank_id=local_rank, + ) + for local_rank in range(self.nprocs) + } + + def _poll(self) -> Optional[RunProcsResult]: + done_local_ranks = set() + for local_rank in self._running_local_ranks: + handler = self.subprocess_handlers[local_rank] + exitcode = handler.proc.poll() + if exitcode is not None: + done_local_ranks.add(local_rank) + if exitcode != 0: # failed or signaled + self._failures[local_rank] = ProcessFailure( + local_rank=local_rank, + pid=handler.proc.pid, + exitcode=exitcode, + error_file=self.error_files[local_rank], + ) + # else: --> succeeded; nothing to do + + self._running_local_ranks.difference_update(done_local_ranks) + + # if ALL procs are finished or ANY have failed + if not self._running_local_ranks or self._failures: + self.close() # terminate all running procs + result = RunProcsResult( + failures=self._failures, + stdouts=self.stdouts, + stderrs=self.stderrs, + ) + if result.is_failed(): + first_failure = min(result.failures.values(), key=lambda f: f.timestamp) + log.error( + "failed (exitcode: %s)" + " local_rank: %s (pid: %s)" + " of binary: %s", + first_failure.exitcode, first_failure.local_rank, first_failure.pid, self.entrypoint + ) + else: + # Populate return with dummy values. This provides consistency with MultiprocessingHandler + result.return_values = dict.fromkeys(range(self.nprocs)) + + return result + else: # there are no failures and procs still running + return None + + def pids(self) -> Dict[int, int]: + return { + local_rank: sh.proc.pid + for local_rank, sh in self.subprocess_handlers.items() + } + + def _close(self, death_sig: signal.Signals, timeout: int = 30) -> None: + if not self.subprocess_handlers: + return + for handler in self.subprocess_handlers.values(): + if handler.proc.poll() is None: + log.warning( + "Sending process %s closing signal %s", handler.proc.pid, death_sig.name + ) + handler.close(death_sig=death_sig) + end = time.monotonic() + timeout + for handler in self.subprocess_handlers.values(): + time_to_wait = end - time.monotonic() + if time_to_wait <= 0: + break + try: + handler.proc.wait(time_to_wait) + except subprocess.TimeoutExpired: + # Ignore the timeout expired exception, since + # the child process will be forcefully terminated via SIGKILL + pass + for handler in self.subprocess_handlers.values(): + if handler.proc.poll() is None: + log.warning( + "Unable to shutdown process %s via %s, forcefully exiting via %s", + handler.proc.pid, death_sig, _get_kill_signal() + ) + handler.close(death_sig=_get_kill_signal()) + handler.proc.wait() diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3e9a9df36146b553cde0010c58e28da0f3f0cc7d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py @@ -0,0 +1,375 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +""" +Each host in a distributed PyTorch job runs with a single TorchElastic agent, +and multiple workers (as children processes of the TorchElastic agent). +Since the workers are user-provided (your PyTorch script/job), TorchElastic +has a way to propagate errors on the trainers through the agent and up to the +scheduler, which ultimately informs the end-user about the state of the job +and applies any retry policies. + +TorchElastic categorizes errors into 3 categories: + ++----------------+----------------+--------------------------------------------------------------+ +| Category | Sub-Category | Description | ++================+================+==============================================================+ +| User Error | Input Error | invalid inputs to TorchElastic APIs (e.g. min > max nodes) | +| +----------------+--------------------------------------------------------------+ +| | Worker Failure | any failures on the worker child process | ++----------------+----------------+--------------------------------------------------------------+ +| Platform Error | n/a | failures caused by the agent | ++----------------+----------------+--------------------------------------------------------------+ +| Infra Error | n/a | failures outside the domain of the agent and workers | +| | | (e.g. host failures) | ++----------------+----------------+--------------------------------------------------------------+ + +All errors other than "Worker Failure" are either raised canonically from the +agent process or implicitly or explicitly crash the agent process. So the +standard language (python) provided exception handling strategies apply. + +Worker Failures are special because the exception/failure originates on a different +process from the agent so the error needs to be propagated inter-process +(e.g. the agent cannot simply ``try-catch`` an exception raised on the worker process). + +TorchElastic agents use :func:`torch.distributed.elastic.multiprocessing.start_processes` +to launch the workers which has a simple file based inter-process error propagation +built-in. + +Any function or binary entrypoint decorated with :func:`record` +will write uncaught exceptions (with the trace information) to a file specified by the +environment variable ``TORCHELASTIC_ERROR_FILE``. The parent process (e.g. agent) +sets this env var on each child it launches, then aggregates the error files for all +children, and propagates the one with the **smallest** timestamp (e.g. the **first** error). +""" + +import json +import os +import signal +import socket +import time +import warnings +from dataclasses import dataclass, field +from datetime import datetime +from functools import wraps +from string import Template +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar + +from torch.distributed.elastic.utils.logging import get_logger + +from .error_handler import ErrorHandler # noqa: F401 +from .handlers import get_error_handler # noqa: F401 + +__all__ = ["ProcessFailure", "ChildFailedError", "record", "ErrorHandler", "get_error_handler"] + +log = get_logger(__name__) + + +JSON = Dict + +_EMPTY_ERROR_DATA = {"message": ""} +_NOT_AVAILABLE = "" + +T = TypeVar("T") + + +@dataclass +class ProcessFailure: + """ + Represent the failed process result. When the worker process fails, it may record failure root cause into the file. + + Tries to read the failure timestamp from the provided ``error_file``, + if the ``error_file`` does not exist, the timestamp is the current + timestamp (seconds since epoch). + + The ``message`` field is a concise explanation of the failure. If + the error file exists then the message is obtained from the error file. + Otherwise one is generated based on the failure signature. + + .. note:: It is assumed that the ``error_file`` is written by + ``torch.distributed.elastic.multiprocessing.errors.error_handler.ErrorHandler``. + Otherwise the behavior is undefined. + + """ + + local_rank: int + pid: int + exitcode: int + error_file: str + error_file_data: JSON = field(init=False) + message: str = field(init=False) + timestamp: int = field(init=False) + + def __post_init__(self): + self.error_file_data = _EMPTY_ERROR_DATA + if os.path.isfile(self.error_file): + try: + with open(self.error_file) as fp: + self.error_file_data = json.load(fp) + log.debug( + "User process failed with error data: %s", json.dumps(self.error_file_data, indent=2) + ) + self.message, self.timestamp = self._get_error_data( + self.error_file_data + ) + except Exception: + log.exception("Failed to parse reply file: %s", self.error_file) + raise + else: + self._set_no_reply_file() + + # make up an informative message if not already present + if not self.message: + # signals typically do not generate an error file message + if self.exitcode < 0: + self.message = ( + f"Signal {-self.exitcode} ({self.signal_name()})" + f" received by PID {self.pid}" + ) + else: + self.message = "To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html" + + def _get_error_data(self, error_file_data: Dict[str, Any]) -> Tuple[str, int]: + message = error_file_data["message"] + if isinstance(message, str): + timestamp = int(error_file_data.get("timestamp", 0)) + else: + timestamp = int(message["extraInfo"]["timestamp"]) + return (message, timestamp) + + def _set_no_reply_file(self): + self.error_file = _NOT_AVAILABLE + self.error_file_data = _EMPTY_ERROR_DATA + self.message = "" + self.timestamp = int(time.time()) + + def signal_name(self) -> str: + if self.exitcode < 0: + # We don't want to kill the parent process trying to find the signal name. + # if the signal doesn't map to a known name, use not available. + try: + return signal.Signals(-self.exitcode).name + except Exception: + return _NOT_AVAILABLE + else: + return _NOT_AVAILABLE + + def timestamp_isoformat(self): + """Return timestamp in ISO format (YYYY-MM-DD_HH:MM:SS).""" + return datetime.fromtimestamp(self.timestamp).isoformat(sep="_") + + +GlobalRank = int + +_FAILURE_FORMAT_TEMPLATE = """[${idx}]: + time : ${time} + host : ${hostname} + rank : ${rank} (local_rank: ${local_rank}) + exitcode : ${exitcode} (pid: ${pid}) + error_file: ${error_file} + traceback : ${message}""" + +# extra new lines before and after are intentional +_MSG_FORMAT_TEMPLATE = """ +${boarder} +${title} +${section} +Failures: +${other_failures} +${section} +Root Cause (first observed failure): +${root_failure} +${boarder}""" + + +class ChildFailedError(Exception): + """ + Special exception type that can be raised from a function annotated with the + ``@record`` decorator to have the child process' (root exception) propagate + up the stack as-is (e.g. without being wrapped in the parent's traceback). + + Useful in cases where the parent is a simple nanny process + and the child (worker) processes are actually doing meaningful compute. + In this case, errors typically occur on the child process as the parent + is not doing anything non-trivial, and child errors should be propagated + to the scheduler for accurate root cause diagnostics. + + .. note:: The propagation relies on error files rather than exception handling to + support both function and binary launches. + + Example: + :: + + # process tree on a host (container) + 0: scheduler-init-process: + |- 1: torchelastic_agent: + |- 2: trainer_0 (ok) + |- 3: trainer_1 (fail) -> error.json + |- ... + |- n+2: trainer_n (ok) + |- n+3: other processes + |- ... + + In the example above, trainer 1's failure (written into error.json) is + the root cause and should be reported to the scheduler's init process. + The torchelastic agent raises a ``ChildFailedError("trainer", {1: "trainer_1/error.json"})`` + upon detecting trainer 1's failure which would propagate the contents + of trainer 1's error file to the scheduler's init process. + """ + + def __init__(self, name: str, failures: Dict[GlobalRank, ProcessFailure]): + self.name = name + self.failures = failures + assert ( + self.failures + ) # does not make sense to create a ChildFaileError with no failures + super().__init__(self.format_msg()) + + def get_first_failure(self) -> Tuple[GlobalRank, ProcessFailure]: + rank = min(self.failures.keys(), key=lambda r: self.failures[r].timestamp) + return rank, self.failures[rank] + + def format_msg(self, boarder_delim="=", section_delim="-"): + title = f"{self.name} FAILED" + root_rank, root_failure = self.get_first_failure() + + root_failure_fmt: str = "" + other_failures_fmt: List[str] = [] + width = len(title) + for idx, (rank, failure) in enumerate(self.failures.items()): + fmt, w = self._format_failure(idx, rank, failure) + width = max(width, w) + if rank == root_rank: + root_failure_fmt = fmt + else: + other_failures_fmt.append(fmt) + + # upper boundary on width + width = min(width, 60) + + return Template(_MSG_FORMAT_TEMPLATE).substitute( + boarder=boarder_delim * width, + title=title, + section=section_delim * width, + root_failure=root_failure_fmt, + other_failures="\n".join(other_failures_fmt or [" "]), + ) + + def _format_failure( + self, idx: int, rank: int, failure: ProcessFailure + ) -> Tuple[str, int]: + + # failure.message is either a str (when the failure does not generate a traceback - e.g. signals) + # or a dict (json) of the form + # {"message": $ERROR_MSG, "extraInfo": {"py_callstack": $TRACEBACK, timestamp: $TS}} + # so the display logic is: + # 1. if failure.message is not a dict (it is a str) just show it as is + # 2. else try to get the traceback (py_callstack) + # 3. if the traceback is not there, use the message + # 4. if the message is not there show + msg = failure.message + if isinstance(failure.message, dict): + msg = ( + failure.message.get("extraInfo", {}) + .get("py_callstack", failure.message.get("message", "")) + .replace("\n", "\n ") # to properly indent the traceback + ) + + fmt = Template(_FAILURE_FORMAT_TEMPLATE).substitute( + idx=idx, + time=failure.timestamp_isoformat(), + hostname=socket.getfqdn(), + rank=rank, + local_rank=failure.local_rank, + exitcode=failure.exitcode, + pid=failure.pid, + error_file=failure.error_file, + message=msg, + ) + width = 0 + for line in fmt.split("\n"): + width = max(width, len(line)) + return fmt, width + + +def record( + fn: Callable[..., T], error_handler: Optional[ErrorHandler] = None +) -> Callable[..., T]: + """ + Syntactic sugar to record errors/exceptions that happened in the decorated + function using the provided ``error_handler``. + + Using this decorator is equivalent to: + + :: + + error_handler = get_error_handler() + error_handler.initialize() + try: + foobar() + except ChildFailedError as e: + _, failure = e.get_first_failure() + error_handler.dump_error_file(failure.error_file, failure.exitcode) + raise + except Exception as e: + error_handler.record(e) + raise + + .. important:: use this decorator once per process at the top level method, + typically this is the main method. + + Example + + :: + + @record + def main(): + pass + + if __name__=="__main__": + main() + + """ + if not error_handler: + error_handler = get_error_handler() + + def wrap(f): + @wraps(f) + def wrapper(*args, **kwargs): + assert error_handler is not None # assertion for mypy type checker + error_handler.initialize() + try: + return f(*args, **kwargs) + except SystemExit as se: + # For run_path based entrypoints, SystemExit with code = 0 will never exit. + # Handling it here by returning a value: + if se.code == 0: + return None + else: + raise + except ChildFailedError as e: + rank, failure = e.get_first_failure() + if failure.error_file != _NOT_AVAILABLE: + error_handler.dump_error_file(failure.error_file, failure.exitcode) + else: + log.info( + ( + "local_rank %s FAILED with no error file." + " Decorate your entrypoint fn with @record for traceback info." + " See: https://pytorch.org/docs/stable/elastic/errors.html", + rank + ) + ) + raise + except Exception as e: + error_handler.record_exception(e) + raise + + return wrapper + + return wrap(fn) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b07aa966b2f9703a041372046841c7372802502f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/error_handler.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/error_handler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc5a4685e9ba70967f40b60cda99b584bf44124d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/error_handler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/handlers.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/handlers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73bdd5492162dba751f37efbd1d6b56ffe4bbd4d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/handlers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/error_handler.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/error_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..481e9da23b4b1557d8bc7d1e990267664fee063a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/error_handler.py @@ -0,0 +1,159 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +import faulthandler +import json +import logging +import os +import time +import traceback +import warnings +from typing import Any, Dict, Optional + +__all__ = ['ErrorHandler'] + +log = logging.getLogger(__name__) + + +class ErrorHandler: + """ + Write the provided exception object along with some other metadata about + the error in a structured way in JSON format to an error file specified by the + environment variable: ``TORCHELASTIC_ERROR_FILE``. If this environment + variable is not set, then simply logs the contents of what would have been + written to the error file. + + This handler may be subclassed to customize the handling of the error. + Subclasses should override ``initialize()`` and ``record_exception()``. + """ + + def _get_error_file_path(self) -> Optional[str]: + """ + Return the error file path. + + May return ``None`` to have the structured error be logged only. + """ + return os.environ.get("TORCHELASTIC_ERROR_FILE", None) + + def initialize(self) -> None: + """ + Call prior to running code that we wish to capture errors/exceptions. + + Typically registers signal/fault handlers. Users can override this + function to add custom initialization/registrations that aid in + propagation/information of errors/signals/exceptions/faults. + """ + try: + faulthandler.enable(all_threads=True) + except Exception as e: + warnings.warn(f"Unable to enable fault handler. {type(e).__name__}: {e}") + + def _write_error_file(self, file_path: str, error_msg: str) -> None: + """Write error message to the file.""" + try: + with open(file_path, "w") as fp: + fp.write(error_msg) + except Exception as e: + warnings.warn(f"Unable to write error to file. {type(e).__name__}: {e}") + + def record_exception(self, e: BaseException) -> None: + """ + Write a structured information about the exception into an error file in JSON format. + + If the error file cannot be determined, then logs the content + that would have been written to the error file. + """ + file = self._get_error_file_path() + if file: + data = { + "message": { + "message": f"{type(e).__name__}: {e}", + "extraInfo": { + "py_callstack": traceback.format_exc(), + "timestamp": str(int(time.time())), + }, + } + } + with open(file, "w") as fp: + json.dump(data, fp) + + def override_error_code_in_rootcause_data( + self, + rootcause_error_file: str, + rootcause_error: Dict[str, Any], + error_code: int = 0, + ): + """Modify the rootcause_error read from the file, to correctly set the exit code.""" + if "message" not in rootcause_error: + log.warning( + "child error file (%s) does not have field `message`. \n" + "cannot override error code: %s", + rootcause_error_file, error_code + ) + elif isinstance(rootcause_error["message"], str): + log.warning( + "child error file (%s) has a new message format. \n" + "skipping error code override", + rootcause_error_file + ) + else: + rootcause_error["message"]["errorCode"] = error_code + + def dump_error_file(self, rootcause_error_file: str, error_code: int = 0): + """Dump parent error file from child process's root cause error and error code.""" + with open(rootcause_error_file) as fp: + rootcause_error = json.load(fp) + # Override error code since the child process cannot capture the error code if it + # is terminated by signals like SIGSEGV. + if error_code: + self.override_error_code_in_rootcause_data(rootcause_error_file, rootcause_error, error_code) + log.debug( + "child error file (%s) contents:\n" + "%s", + rootcause_error_file, json.dumps(rootcause_error, indent=2) + ) + + my_error_file = self._get_error_file_path() + if my_error_file: + # Guard against existing error files + # This can happen when the child is created using multiprocessing + # and the same env var (TORCHELASTIC_ERROR_FILE) is used on the + # parent and child to specify the error files (respectively) + # because the env vars on the child is set in the wrapper function + # and by default the child inherits the parent's env vars, if the child + # process receives a signal before the wrapper function kicks in + # and the signal handler writes to the error file, then the child + # will write to the parent's error file. In this case just log the + # original error file contents and overwrite the error file. + self._rm(my_error_file) + self._write_error_file(my_error_file, json.dumps(rootcause_error)) + log.info("dumped error file to parent's %s", my_error_file) + else: + log.error( + "no error file defined for parent, to copy child error file (%s)", rootcause_error_file + ) + + def _rm(self, my_error_file): + if os.path.isfile(my_error_file): + # Log the contents of the original file. + with open(my_error_file) as fp: + try: + original = json.dumps(json.load(fp), indent=2) + log.warning( + "%s already exists" + " and will be overwritten." + " Original contents:\n%s", + my_error_file, original + ) + except json.decoder.JSONDecodeError as err: + log.warning( + "%s already exists" + " and will be overwritten." + " Unable to load original contents:\n", + my_error_file + ) + os.remove(my_error_file) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/handlers.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/handlers.py new file mode 100644 index 0000000000000000000000000000000000000000..3071aef1711785602265a4dec81405b382444132 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/handlers.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +# Multiprocessing error-reporting module + + +from torch.distributed.elastic.multiprocessing.errors.error_handler import ErrorHandler + +__all__ = ['get_error_handler'] + +def get_error_handler(): + return ErrorHandler() diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/redirects.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/redirects.py new file mode 100644 index 0000000000000000000000000000000000000000..e632558193832b66515e1f8a0af8975cb85d9874 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/redirects.py @@ -0,0 +1,102 @@ +# !/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# Taken and modified from original source: +# https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/ +import ctypes +import logging +import os +import sys +from contextlib import contextmanager +from functools import partial + +IS_WINDOWS = sys.platform == "win32" +IS_MACOS = sys.platform == "darwin" + + +logger = logging.getLogger(__name__) + + +def get_libc(): + if IS_WINDOWS or IS_MACOS: + logger.warning( + "NOTE: Redirects are currently not supported in Windows or MacOs." + ) + return None + else: + return ctypes.CDLL("libc.so.6") + + +libc = get_libc() + + +def _c_std(stream: str): + return ctypes.c_void_p.in_dll(libc, stream) + + +def _python_std(stream: str): + return {"stdout": sys.stdout, "stderr": sys.stderr}[stream] + + +_VALID_STD = {"stdout", "stderr"} + + +@contextmanager +def redirect(std: str, to_file: str): + """ + Redirect ``std`` (one of ``"stdout"`` or ``"stderr"``) to a file in the path specified by ``to_file``. + + This method redirects the underlying std file descriptor (not just python's ``sys.stdout|stderr``). + See usage for details. + + Directory of ``dst_filename`` is assumed to exist and the destination file + is overwritten if it already exists. + + .. note:: Due to buffering cross source writes are not guaranteed to + appear in wall-clock order. For instance in the example below + it is possible for the C-outputs to appear before the python + outputs in the log file. + + Usage: + + :: + + # syntactic-sugar for redirect("stdout", "tmp/stdout.log") + with redirect_stdout("/tmp/stdout.log"): + print("python stdouts are redirected") + libc = ctypes.CDLL("libc.so.6") + libc.printf(b"c stdouts are also redirected" + os.system("echo system stdouts are also redirected") + + print("stdout restored") + + """ + if std not in _VALID_STD: + raise ValueError( + f"unknown standard stream <{std}>, must be one of {_VALID_STD}" + ) + + c_std = _c_std(std) + python_std = _python_std(std) + std_fd = python_std.fileno() + + def _redirect(dst): + libc.fflush(c_std) + python_std.flush() + os.dup2(dst.fileno(), std_fd) + + with os.fdopen(os.dup(std_fd)) as orig_std, open(to_file, mode="w+b") as dst: + _redirect(dst) + try: + yield + finally: + _redirect(orig_std) + + +redirect_stdout = partial(redirect, "stdout") +redirect_stderr = partial(redirect, "stderr") diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4c335964c7322aac57a485c3d4ea160b5ab34dba --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__init__.py @@ -0,0 +1,15 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +from torch.distributed.elastic.multiprocessing.subprocess_handler.handlers import ( + get_subprocess_handler, +) +from torch.distributed.elastic.multiprocessing.subprocess_handler.subprocess_handler import ( + SubprocessHandler, +) + +__all__ = ["SubprocessHandler", "get_subprocess_handler"] diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..145abea01ad779c0956c80d28efa05c31eebf1dd Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/handlers.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/handlers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3b41694bae073f7433064e1e1d182e606fb755e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/handlers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/subprocess_handler.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/subprocess_handler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1bb8b845c1753afa9148209891910c14f329461 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/subprocess_handler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/handlers.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/handlers.py new file mode 100644 index 0000000000000000000000000000000000000000..8d4477452a200edb881ae3573ff63db6c9f67e65 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/handlers.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +from typing import Dict, Tuple + +from torch.distributed.elastic.multiprocessing.subprocess_handler.subprocess_handler import ( + SubprocessHandler, +) + +__all__ = ["get_subprocess_handler"] + + +def get_subprocess_handler( + entrypoint: str, + args: Tuple, + env: Dict[str, str], + stdout: str, + stderr: str, + local_rank_id: int, +): + return SubprocessHandler( + entrypoint=entrypoint, + args=args, + env=env, + stdout=stdout, + stderr=stderr, + local_rank_id=local_rank_id, + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/subprocess_handler.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/subprocess_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..7cacf986857500afb2e18f7af4f97cd3867300ea --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/subprocess_handler.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +import os +import signal +import subprocess +import sys + +from typing import Any, Dict, Optional, Tuple + +__all__ = ["SubprocessHandler"] + +IS_WINDOWS = sys.platform == "win32" + + +def _get_default_signal() -> signal.Signals: + """Get the default termination signal. SIGTERM for unix, CTRL_C_EVENT for windows.""" + if IS_WINDOWS: + return signal.CTRL_C_EVENT # type: ignore[attr-defined] # noqa: F821 + else: + return signal.SIGTERM + + +class SubprocessHandler: + """ + Convenience wrapper around python's ``subprocess.Popen``. Keeps track of + meta-objects associated to the process (e.g. stdout and stderr redirect fds). + """ + + def __init__( + self, + entrypoint: str, + args: Tuple, + env: Dict[str, str], + stdout: str, + stderr: str, + local_rank_id: int, + ): + self._stdout = open(stdout, "w") if stdout else None + self._stderr = open(stderr, "w") if stderr else None + # inherit parent environment vars + env_vars = os.environ.copy() + env_vars.update(env) + + args_str = (entrypoint, *[str(e) for e in args]) + self.local_rank_id = local_rank_id + self.proc: subprocess.Popen = self._popen(args_str, env_vars) + + def _popen(self, args: Tuple, env: Dict[str, str]) -> subprocess.Popen: + kwargs: Dict[str, Any] = {} + if not IS_WINDOWS: + kwargs["start_new_session"] = True + return subprocess.Popen( + # pyre-fixme[6]: Expected `Union[typing.Sequence[Union[_PathLike[bytes], + # _PathLike[str], bytes, str]], bytes, str]` for 1st param but got + # `Tuple[str, *Tuple[Any, ...]]`. + args=args, + env=env, + stdout=self._stdout, + stderr=self._stderr, + **kwargs, + ) + + def close(self, death_sig: Optional[signal.Signals] = None) -> None: + if not death_sig: + death_sig = _get_default_signal() + if IS_WINDOWS: + self.proc.send_signal(death_sig) + else: + os.killpg(self.proc.pid, death_sig) + if self._stdout: + self._stdout.close() + if self._stderr: + self._stderr.close() diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/tail_log.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/tail_log.py new file mode 100644 index 0000000000000000000000000000000000000000..f99c3bda4580b0c8053e9884607f63502bcb90ad --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/tail_log.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import logging +import os +import time +from concurrent.futures._base import Future +from concurrent.futures.thread import ThreadPoolExecutor +from threading import Event +from typing import Dict, List, Optional, TextIO + +__all__ = ["tail_logfile", "TailLog"] + +log = logging.getLogger(__name__) + + +def tail_logfile( + header: str, file: str, dst: TextIO, finished: Event, interval_sec: float +): + + while not os.path.exists(file): + if finished.is_set(): + return + time.sleep(interval_sec) + + with open(file, errors="replace") as fp: + while True: + line = fp.readline() + + if line: + dst.write(f"{header}{line}") + else: # reached EOF + if finished.is_set(): + # log line producer is finished + break + else: + # log line producer is still going + # wait for a bit before looping again + time.sleep(interval_sec) + + +class TailLog: + """ + Tail the given log files. + + The log files do not have to exist when the ``start()`` method is called. The tail-er will gracefully wait until + the log files are created by the producer and will tail the contents of the + log files until the ``stop()`` method is called. + + .. warning:: ``TailLog`` will wait indefinitely for the log file to be created! + + Each log file's line will be suffixed with a header of the form: ``[{name}{idx}]:``, + where the ``name`` is user-provided and ``idx`` is the index of the log file + in the ``log_files`` mapping. ``log_line_prefixes`` can be used to override the + header for each log file. + + Usage: + + :: + + log_files = {0: "/tmp/0_stdout.log", 1: "/tmp/1_stdout.log"} + tailer = TailLog("trainer", log_files, sys.stdout).start() + # actually run the trainers to produce 0_stdout.log and 1_stdout.log + run_trainers() + tailer.stop() + + # once run_trainers() start writing the ##_stdout.log files + # the tailer will print to sys.stdout: + # >>> [trainer0]:log_line1 + # >>> [trainer1]:log_line1 + # >>> [trainer0]:log_line2 + # >>> [trainer0]:log_line3 + # >>> [trainer1]:log_line2 + + .. note:: Due to buffering log lines between files may not necessarily + be printed out in order. You should configure your application's + logger to suffix each log line with a proper timestamp. + + """ + + def __init__( + self, + name: str, + log_files: Dict[int, str], + dst: TextIO, + log_line_prefixes: Optional[Dict[int, str]] = None, + interval_sec: float = 0.1, + ): + n = len(log_files) + self._threadpool = None + if n > 0: + self._threadpool = ThreadPoolExecutor( + max_workers=n, + thread_name_prefix=f"{self.__class__.__qualname__}_{name}", + ) + + self._name = name + self._dst = dst + self._log_files = log_files + self._log_line_prefixes = log_line_prefixes + self._finished_events: Dict[int, Event] = { + local_rank: Event() for local_rank in log_files.keys() + } + self._futs: List[Future] = [] + self._interval_sec = interval_sec + self._stopped = False + + def start(self) -> "TailLog": + if not self._threadpool: + return self + + for local_rank, file in self._log_files.items(): + header = f"[{self._name}{local_rank}]:" + if self._log_line_prefixes and local_rank in self._log_line_prefixes: + header = self._log_line_prefixes[local_rank] + self._futs.append( + self._threadpool.submit( + tail_logfile, + header=header, + file=file, + dst=self._dst, + finished=self._finished_events[local_rank], + interval_sec=self._interval_sec, + ) + ) + return self + + def stop(self) -> None: + for finished in self._finished_events.values(): + finished.set() + + for local_rank, f in enumerate(self._futs): + try: + f.result() + except Exception as e: + log.error( + "error in log tailor for %s%s. %s: %s", + self._name, local_rank, + e.__class__.__qualname__, e, + ) + + if self._threadpool: + self._threadpool.shutdown(wait=True) + + self._stopped = True + + def stopped(self) -> bool: + return self._stopped diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..668849fa6d2078a9da89c5049b0706ac072984a8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__init__.py @@ -0,0 +1,150 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +""" +In the context of Torch Distributed Elastic we use the term *rendezvous* to +refer to a particular functionality that combines a **distributed +synchronization** primitive with **peer discovery**. + +It is used by Torch Distributed Elastic to gather participants of a training +job (i.e. nodes) such that they all agree on the same list of participants and +everyone's roles, as well as make a consistent collective decision on when +training can begin/resume. + +Torch Distributed Elastic rendezvous provides the following critical +functionalities: + +**Barrier**: + +Nodes performing rendezvous will all block until the rendezvous is considered +complete - this happens when at least ``min`` total number of nodes have joined +the rendezvous barrier (for the same job). This also implies the barrier is not +necessarily of fixed size. + +There's an additional small waiting time after reaching ``min`` number of +nodes - this is used to ensure the rendezvous is not completed "too quickly" +(which could potentially exclude additional nodes attempting to join at +approximately the same time). + +If ``max`` number of nodes is gathered at the barrier, the rendezvous is +completed immediately. + +There's also an overall timeout which causes the rendezvous to fail if ``min`` +number of nodes is never reached - this is meant to be a simple fail-safe to +help release partially allocated job resources, in case there's a problem with +the resource manager, and is meant to be interpreted as non-retryable. + +**Exclusivity**: + +A simple distributed barrier would not be sufficient, as we also need to ensure +that only one group of nodes exists at any given time (for a given job). In +other words, new nodes (i.e. joining late) should not be able to form a parallel +independent group of workers for the same job. + +Torch Distributed Elastic rendezvous ensures that if a group of nodes has +already completed a rendezvous (and hence might already be training), then +additional "late" nodes attempting to rendezvous will only announce themselves +as waiting, and will have to wait until the (previously completed) existing +rendezvous is destroyed first. + +**Consistency**: + +When a rendezvous is completed, all its members will agree on the job membership +and everyone's role in it. This role is represented using an integer, called +rank, that is between between 0 and world size. + +Note that ranks are *not stable*, in the sense that the same node can be +assigned a different rank in the next (re-)rendezvous. + +**Fault-tolerance**: + +Torch Distributed Elastic rendezvous is designed to tolerate node failures +during the rendezvous process. Should a process crash (or lose network +connectivity, etc), between joining the rendezvous and it being completed, then +a re-rendezvous with remaining healthy nodes will happen automatically. + +A node can also fail *after* it has completed (or *has been observered* by other +nodes to have completed) the rendezvous - this scenario will be handled by the +Torch Distributed Elastic ``train_loop`` instead (where it will also trigger a +re-rendezvous). + +**Shared key-value store**: + +When the rendezvous is completed, a shared key-value store is created and +returned. This store implements a ``torch.distributed.Store`` API (see +`distributed communication docs +`__). + +This store is only shared by the members of the completed rendezvous. It +is intended to be used by Torch Distributed Elastic to exchange information +necessary to initialize job control and data-planes. + +**Waiting workers and rendezvous closing**: + +Torch Distributed Elastic rendezvous handler object provides additional +functionalities, which are technically not part of the rendezvous process: + +1. Querying how many workers arrived late at the barrier, who can participate in + *next* rendezvous. + +2. Setting the rendezvous *closed* to signal all nodes not to participate in + next rendezvous. + +**DynamicRendezvousHandler**: + +Torch Distributed Elastic comes with the :py:class:`.DynamicRendezvousHandler` +class that implements the rendezvous mechanism described above. It is a backend- +agnostic type that expects a particular :py:class:`.RendezvousBackend` instance +to be specified during construction. + +Torch distributed users can either implement their own backend type or use one +of the following implementations that come with PyTorch: + +- :py:class:`.C10dRendezvousBackend`: Uses a C10d store (by default + ``TCPStore``) as the rendezvous backend. The main advantage of using a C10d + store is that it requires no 3rd-party dependency (such as etcd) to establish + a rendezvous. +- :py:class:`.EtcdRendezvousBackend`: Supersedes the legacy + :py:class:`.EtcdRendezvousHandler` class. Passing an + :py:class:`.EtcdRendezvousBackend` instance to + :py:class:`.DynamicRendezvousHandler` is functionally equivalent to + instantiating an :py:class:`.EtcdRendezvousHandler`. + + :: + + store = TCPStore("localhost") + + backend = C10dRendezvousBackend(store, "my_run_id") + + rdzv_handler = DynamicRendezvousHandler.from_backend( + run_id="my_run_id", + store=store, + backend=backend, + min_nodes=2, + max_nodes=4 + ) +""" + +from .api import * # noqa: F403 +from .registry import _register_default_handlers + + +_register_default_handlers() + + +__all__ = [ + "RendezvousClosedError", + "RendezvousConnectionError", + "RendezvousError", + "RendezvousGracefulExitError", + "RendezvousHandler", + "RendezvousHandlerCreator", + "RendezvousHandlerRegistry", + "RendezvousParameters", + "RendezvousStateError", + "RendezvousTimeoutError", + "rendezvous_handler_registry", +] diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0cf18753e6e1b4ed42545024b0b7ed1cdd72b95d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..baae4458bd56e0d943ad1f6cc55f521248da2fb6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/c10d_rendezvous_backend.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/c10d_rendezvous_backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cec54e992f7a7baf8da470b3e49a3e0ef1f465ae Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/c10d_rendezvous_backend.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/dynamic_rendezvous.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/dynamic_rendezvous.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e47465abb89bb95fb489bc97780ba41b086ebfc0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/dynamic_rendezvous.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_rendezvous.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_rendezvous.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..281454551c5c2ff7517d5936ed0b2d6e6f86b144 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_rendezvous.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_rendezvous_backend.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_rendezvous_backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e4cb69edb03bbe769fa780ec474db6d80dbd1f7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_rendezvous_backend.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_server.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_server.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4e122dcbb627439bc2e5454b025bff1b1d7f5ef Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_server.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_store.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_store.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a33516ff6647f32b00140d31466337042d0ee17 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_store.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/registry.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/registry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fec72db28e8ce7e711bafe40ed24bfbab1e88ba2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/registry.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/static_tcp_rendezvous.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/static_tcp_rendezvous.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef8ba7bbcacef8e380f442e8013e5f18fab4bd84 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/static_tcp_rendezvous.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c265dca854a8f4a5ab26cf650704d491f851946a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/api.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/api.py new file mode 100644 index 0000000000000000000000000000000000000000..9e096c4ee409028cb7bb0ad6796ab6295d35ef22 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/api.py @@ -0,0 +1,277 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from abc import ABC, abstractmethod +from typing import Any, Callable, Dict, Optional, Tuple + +from torch.distributed import Store + + +class RendezvousError(Exception): + """Represents the base type for rendezvous errors.""" + + +class RendezvousClosedError(RendezvousError): + """Raised when a rendezvous is closed.""" + + +class RendezvousTimeoutError(RendezvousError): + """Raised when a rendezvous did not complete on time.""" + + +class RendezvousConnectionError(RendezvousError): + """Raised when the connection to a rendezvous backend has failed.""" + + +class RendezvousStateError(RendezvousError): + """Raised when the state of a rendezvous is corrupt.""" + +class RendezvousGracefulExitError(RendezvousError): + """Raised when node wasn't not included in rendezvous and gracefully exits. + + Exception is a mechanism to exit the stack, however does not mean a failure. + """ + +class RendezvousHandler(ABC): + """Main rendezvous interface. + + Note: + Distributed Torch users normally **do not** need to implement their own + ``RendezvousHandler``. An implementation based on C10d Store is already + provided, and is recommended for most users. + """ + + @abstractmethod + def get_backend(self) -> str: + """Return the name of the rendezvous backend.""" + + @abstractmethod + def next_rendezvous( + self, + ) -> Tuple[Store, int, int]: + """Main entry-point into the rendezvous barrier. + + Blocks until the rendezvous is complete and the current process is + included in the formed worker group, or a timeout occurs, or the + rendezvous was marked closed. + + Returns: + A tuple of :py:class:`torch.distributed.Store`, ``rank``, and + ``world size``. + + Raises: + RendezvousClosedError: + The rendezvous is closed. + RendezvousConnectionError: + The connection to the rendezvous backend has failed. + RendezvousStateError: + The rendezvous state is corrupt. + RendezvousTimeoutError: + The rendezvous did not complete on time. + """ + + @abstractmethod + def is_closed(self) -> bool: + """Check whether the rendezvous has been closed. + + A closed rendezvous means all future attempts to re-rendezvous within + same job will fail. + + ``is_closed()`` and :py:meth:`set_closed` have semantics of eventual + propagation and should not be used for synchronization. The intention is + that if at least one node decides the job is finished, it will close the + rendezvous, and other nodes will soon observe this and stop running as + well. + """ + + @abstractmethod + def set_closed(self): + """Mark the rendezvous as closed.""" + + @abstractmethod + def num_nodes_waiting(self) -> int: + """Return the number of nodes who arrived late at the rendezvous + barrier, hence were not included in the current worker group. + + Callers should periodically call this method to check whether new + nodes are waiting to join the job and if so admit them by calling + :py:meth:`next_rendezvous()` (re-rendezvous). + """ + + @abstractmethod + def get_run_id(self) -> str: + """Return the run id of the rendezvous. + + The run id is a user-defined id that uniquely identifies an instance of + a distributed application. It typically maps to a job id and is used to + allow nodes to join the correct distributed application. + """ + + @abstractmethod + def shutdown(self) -> bool: + """Close all resources that were open for the rendezvous. + + Example:: + + rdzv_handler = ... + try: + store, rank, world_size = rdzv_handler.next_rendezvous() + finally: + rdzv_handler.shutdown() + """ + + +class RendezvousParameters: + """Hold the parameters to construct a :py:class:`RendezvousHandler`. + + Args: + backend: + The name of the backend to use to handle the rendezvous. + endpoint: + The endpoint of the rendezvous, usually in form [:]. + run_id: + The id of the rendezvous. + min_nodes: + The minimum number of nodes to admit to the rendezvous. + max_nodes: + The maximum number of nodes to admit to the rendezvous. + local_addr: + The address of the local node. + **kwargs: + Additional parameters for the specified backend. + """ + + def __init__( + self, + backend: str, + endpoint: str, + run_id: str, + min_nodes: int, + max_nodes: int, + local_addr: Optional[str] = None, + **kwargs, + ): + if not backend: + raise ValueError("The rendezvous backend name must be a non-empty string.") + + if min_nodes < 1: + raise ValueError( + f"The minimum number of rendezvous nodes ({min_nodes}) must be greater than zero." + ) + if max_nodes < min_nodes: + raise ValueError( + f"The maximum number of rendezvous nodes ({max_nodes}) must be greater than or " + f"equal to the minimum number of rendezvous nodes ({min_nodes})." + ) + + self.backend = backend + self.endpoint = endpoint + self.run_id = run_id + self.min_nodes = min_nodes + self.max_nodes = max_nodes + self.config = kwargs + self.local_addr = local_addr + + def get(self, key: str, default: Any = None) -> Any: + """Return the value for ``key`` if ``key`` exists, else ``default``.""" + return self.config.get(key, default) + + def get_as_bool(self, key: str, default: Optional[bool] = None) -> Optional[bool]: + """Return the value for ``key`` as a ``bool``.""" + value = self.get(key, default) + if value is None or isinstance(value, bool): + return value + if isinstance(value, int): + if value == 1: + return True + if value == 0: + return False + elif isinstance(value, str): + if value.lower() in ["1", "true", "t", "yes", "y"]: + return True + if value.lower() in ["0", "false", "f", "no", "n"]: + return False + raise ValueError( + f"The rendezvous configuration option '{key}' does not represent a valid boolean value." + ) + + def get_as_int(self, key: str, default: Optional[int] = None) -> Optional[int]: + """Return the value for ``key`` as an ``int``.""" + value = self.get(key, default) + if value is None: + return value + try: + return int(value) + except ValueError as e: + raise ValueError( + f"The rendezvous configuration option '{key}' does not represent a valid integer " + "value." + ) from e + + +RendezvousHandlerCreator = Callable[[RendezvousParameters], RendezvousHandler] + + +class RendezvousHandlerRegistry: + """Represent a registry of :py:class:`RendezvousHandler` backends.""" + + _registry: Dict[str, RendezvousHandlerCreator] + + def __init__(self) -> None: + self._registry = {} + + def register(self, backend: str, creator: RendezvousHandlerCreator) -> None: + """Register a new rendezvous backend. + + Args: + backend: + The name of the backend. + creator: + The callback to invoke to construct the + :py:class:`RendezvousHandler`. + """ + if not backend: + raise ValueError("The rendezvous backend name must be a non-empty string.") + + current_creator: Optional[RendezvousHandlerCreator] + try: + current_creator = self._registry[backend] + except KeyError: + current_creator = None + + if current_creator is not None and current_creator != creator: + raise ValueError( + f"The rendezvous backend '{backend}' cannot be registered with '{creator}' as it " + f"is already registered with '{current_creator}'." + ) + + self._registry[backend] = creator + + def create_handler(self, params: RendezvousParameters) -> RendezvousHandler: + """Create a new :py:class:`RendezvousHandler`.""" + try: + creator = self._registry[params.backend] + except KeyError as e: + raise ValueError( + f"The rendezvous backend '{params.backend}' is not registered. Did you forget " + f"to call `{self.register.__name__}`?" + ) from e + + handler = creator(params) + + # Do some sanity check. + if handler.get_backend() != params.backend: + raise RuntimeError( + f"The rendezvous backend '{handler.get_backend()}' does not match the requested " + f"backend '{params.backend}'." + ) + + return handler + + +# The default global registry instance used by launcher scripts to instantiate +# rendezvous handlers. +rendezvous_handler_registry = RendezvousHandlerRegistry() diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/c10d_rendezvous_backend.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/c10d_rendezvous_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..144e691ae08caa1c2f6bf28ef3faf3e4bdbe8f77 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/c10d_rendezvous_backend.py @@ -0,0 +1,269 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import binascii +import logging +import os +import tempfile +from base64 import b64decode, b64encode +from datetime import timedelta +from typing import Any, Optional, Tuple, cast + +from torch.distributed import FileStore, Store, TCPStore +from torch.distributed.elastic.events import ( + NodeState, + construct_and_record_rdzv_event, +) + +from .api import ( + RendezvousConnectionError, + RendezvousError, + RendezvousParameters, + RendezvousStateError, +) +from .dynamic_rendezvous import RendezvousBackend, Token +from .utils import _matches_machine_hostname, parse_rendezvous_endpoint + +log = logging.getLogger(__name__) + + +class C10dRendezvousBackend(RendezvousBackend): + """Represents a C10d-backed rendezvous backend. + + Args: + store: + The :py:class:`torch.distributed.Store` instance to use to + communicate with the C10d store. + run_id: + The run id of the rendezvous. + """ + + # See the explanation in the __init__ method. + _NULL_SENTINEL = "Y2FuaW1hZGFt" + + _store: Store + _key: str + + def __init__(self, store: Store, run_id: str) -> None: + if not run_id: + raise ValueError("The run id must be a non-empty string.") + + self._store = store + + self._key = "torch.rendezvous." + run_id + + # The read operation of a store blocks the caller until the specified + # key becomes available. This behavior makes it tricky to use a store + # as a regular key-value dictionary. + # + # As a workaround we initially set a sentinel value as the rendezvous + # state. Whenever this value gets returned we treat it as a None. + self._call_store("compare_set", self._key, "", self._NULL_SENTINEL) + + @property + def name(self) -> str: + """See base class.""" + return "c10d" + + def get_state(self) -> Optional[Tuple[bytes, Token]]: + """See base class.""" + base64_state: bytes = self._call_store("get", self._key) + + return self._decode_state(base64_state) + + def set_state( + self, state: bytes, token: Optional[Token] = None + ) -> Optional[Tuple[bytes, Token, bool]]: + """See base class.""" + base64_state_str: str = b64encode(state).decode() + + if token: + # Shortcut if we know for sure that the token is not valid. + if not isinstance(token, bytes): + result = self.get_state() + if result is not None: + tmp = *result, False + # Python 3.6 does not support tuple unpacking in return + # statements. + return tmp + return None + + token = token.decode() + else: + token = self._NULL_SENTINEL + + base64_state: bytes = self._call_store("compare_set", self._key, token, base64_state_str) + + state_token_pair = self._decode_state(base64_state) + if state_token_pair is None: + return None + + new_state, new_token = state_token_pair + + # C10d Store's compare_set method does not offer an easy way to find out + # whether our write attempt was successful. As a brute-force solution we + # perform a bitwise comparison of our local state and the remote state. + return new_state, new_token, new_state == state + + def _call_store(self, store_op: str, *args, **kwargs) -> Any: + try: + return getattr(self._store, store_op)(*args, **kwargs) + except (ValueError, RuntimeError, TimeoutError) as exc: + raise RendezvousConnectionError( + "The connection to the C10d store has failed. See inner exception for details." + ) from exc + + def _decode_state(self, base64_state: bytes) -> Optional[Tuple[bytes, Token]]: + if base64_state == self._NULL_SENTINEL.encode(): + return None + + try: + state = b64decode(base64_state) + except binascii.Error as exc: + raise RendezvousStateError( + "The state object is corrupt. See inner exception for details." + ) from exc + + return state, base64_state + + +def _create_tcp_store(params: RendezvousParameters) -> TCPStore: + host, port = parse_rendezvous_endpoint(params.endpoint, default_port=29400) + + cfg_is_host = params.get_as_bool("is_host") + # If the user has explicitly specified whether our process should host the + # the store, respect it. + if cfg_is_host is not None: + is_host = cfg_is_host + # Otherwise try to determine whether we are the host based on our hostname + # and IP address. + else: + is_host = _matches_machine_hostname(host) + + use_libuv = params.get_as_bool("use_libuv", False) + + # The timeout + read_timeout = cast(int, params.get_as_int("read_timeout", 60)) + if read_timeout <= 0: + raise ValueError("The read timeout must be a positive integer.") + + # In specific cases we attempt to instantiate the store twice. For details + # see the explanation in the except clause below. + for is_server in [is_host, False]: + try: + store = TCPStore( + host, + port, + is_master=is_server, + timeout=timedelta(seconds=read_timeout), + use_libuv=use_libuv, + ) + + if is_server: + msg = f"Process {os.getpid()} hosts the TCP store for the C10d rendezvous backend." + construct_and_record_rdzv_event( + run_id=params.run_id, message=msg, node_state=NodeState.INIT + ) + log.info(msg) + + break + except (ValueError, RuntimeError, TimeoutError) as exc: + # If we heuristically inferred the value of is_host as True and our + # first attempt to instantiate the TCP store has failed, try it one + # more time with is_host set to False. As an edge case there can be + # more than one process that is part of the same rendezvous on this + # machine and only one of them will eventually host the store. + + if not is_server or cfg_is_host is not None: + raise RendezvousConnectionError( + "The connection to the C10d store has failed. See inner exception for details." + ) from exc + + return store # type: ignore[possibly-undefined] + + +def _create_file_store(params: RendezvousParameters) -> FileStore: + # If a user specifies an endpoint, we treat it as a path to a file. + if params.endpoint: + path = params.endpoint + else: + try: + # The temporary file is readable and writable only by the user of + # this process. + _, path = tempfile.mkstemp() + except OSError as exc: + raise RendezvousError( + "The file creation for C10d store has failed. See inner exception for details." + ) from exc + + try: + store = FileStore(path) + except (ValueError, RuntimeError) as exc: + raise RendezvousConnectionError( + "The connection to the C10d store has failed. See inner exception for details." + ) from exc + + return store + + +def create_backend(params: RendezvousParameters) -> Tuple[C10dRendezvousBackend, Store]: + """Create a new :py:class:`C10dRendezvousBackend` from the specified parameters. + + +--------------+-----------------------------------------------------------+ + | Parameter | Description | + +==============+===========================================================+ + | store_type | The type of the C10d store. The currently supported types | + | | are "tcp" and "file" which correspond to | + | | :py:class:`torch.distributed.TCPStore` and | + | | :py:class:`torch.distributed.FileStore`, respectively. | + | | Defaults to "tcp". | + +--------------+-----------------------------------------------------------+ + | read_timeout | The read timeout, in seconds, for store operations. | + | | Defaults to 60 seconds. | + | | | + | | Note this only applies to | + | | :py:class:`torch.distributed.TCPStore`. It is not relevant| + | | to :py:class:`torch.distributed.FileStore` which does not | + | | take in timeout as a parameter. | + +--------------+-----------------------------------------------------------+ + | is_host | A boolean value indicating whether this backend instance | + | | will host the C10d store. If not specified it will be | + | | inferred heuristically by matching the hostname or the IP | + | | address of this machine against the specified rendezvous | + | | endpoint. Defaults to ``None``. | + | | | + | | Note that this configuration option only applies to | + | | :py:class:`torch.distributed.TCPStore`. In normal | + | | circumstances you can safely skip it; the only time when | + | | it is needed is if its value cannot be correctly | + | | determined (e.g. the rendezvous endpoint has a CNAME as | + | | the hostname or does not match the FQDN of the machine). | + +--------------+-----------------------------------------------------------+ + """ + # As of today we only support TCPStore and FileStore. Other store types do + # not have the required functionality (e.g. compare_set) yet. + store_type = params.get("store_type", "tcp").strip().lower() + store: Store + + try: + if store_type == "file": + store = _create_file_store(params) + elif store_type == "tcp": + store = _create_tcp_store(params) + else: + raise ValueError("Invalid store type given. Currently only supports file and tcp.") + + backend = C10dRendezvousBackend(store, params.run_id) + + except Exception as e: + construct_and_record_rdzv_event( + message=f"{type(e).__name__}: {str(e)}", + run_id=params.run_id, + node_state=NodeState.FAILED, + ) + raise + + return backend, store diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py new file mode 100644 index 0000000000000000000000000000000000000000..7a2ce80ed903905c419f83c8e36ccd118457ad5c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py @@ -0,0 +1,1343 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import inspect +import logging +import os +import pickle +import socket +import threading +import time +import weakref +from abc import ABC, abstractmethod +from dataclasses import dataclass +from datetime import datetime, timedelta +from enum import Enum +from typing import Any, Callable, cast, Dict, List, Optional, Set, Tuple + +from torch.distributed import PrefixStore, Store +from torch.distributed.elastic.events import construct_and_record_rdzv_event, NodeState + +from .api import ( + RendezvousClosedError, + RendezvousError, + RendezvousGracefulExitError, + RendezvousHandler, + RendezvousParameters, + RendezvousStateError, + RendezvousTimeoutError, +) +from .utils import _delay, _PeriodicTimer + +__all__ = ['RendezvousBackend', 'RendezvousTimeout', 'RendezvousSettings', 'DynamicRendezvousHandler', 'create_handler'] + +log = logging.getLogger(__name__) + + +def get_method_name(depth=2): + if len(inspect.stack()) > depth: + return inspect.stack()[depth].function + return "no_method_name" + + +Token = Any +"""Represent an opaque fencing token used by the rendezvous backend.""" + +class RendezvousBackend(ABC): + """Represent a backend that holds the rendezvous state.""" + + @property + @abstractmethod + def name(self) -> str: + """Get the name of the backend.""" + + @abstractmethod + def get_state(self) -> Optional[Tuple[bytes, Token]]: + """Get the rendezvous state. + + Returns: + A tuple of the encoded rendezvous state and its fencing token or + ``None`` if no state is found in the backend. + + Raises: + RendezvousConnectionError: + The connection to the backend has failed. + RendezvousStateError: + The rendezvous state is corrupt. + """ + + @abstractmethod + def set_state( + self, state: bytes, token: Optional[Token] = None + ) -> Optional[Tuple[bytes, Token, bool]]: + """Set the rendezvous state. + + The new rendezvous state is set conditionally: + + - If the specified ``token`` matches the fencing token stored in the + backend, the state will be updated. The new state will be returned + to the caller along with its fencing token. + - If the specified ``token`` does not match the fencing token stored + in the backend, the state won't be updated; instead the existing + state along with its fencing token will be returned to the caller. + - If the specified ``token`` is ``None``, the new state will be set + only if there is no existing state in the backend. Either the new + state or the existing state along with its fencing token will be + returned to the caller. + + Args: + state: + The encoded rendezvous state. + token: + An optional fencing token that was retrieved by a previous call + to :py:meth:`get_state` or ``set_state()``. + + Returns: + A tuple of the serialized rendezvous state, its fencing token, and + a boolean value indicating whether our set attempt succeeded. + + Raises: + RendezvousConnectionError: + The connection to the backend has failed. + RendezvousStateError: + The rendezvous state is corrupt. + """ + + +class RendezvousTimeout: + """Hold the timeout configuration of a rendezvous. + + Args: + join: + The time within which the rendezvous is expected to complete. + last_call: + An additional wait amount before completing the rendezvous once the + rendezvous has the minimum number of required participants. + close: + The time within which the rendezvous is expected to close after a + call to :py:meth:`RendezvousHandler.set_closed` or + :py:meth:`RendezvousHandler.shutdown`. + keep_alive: + The time within which a keep-alive heartbeat is expected to + complete. + """ + + _ZERO = timedelta(0) + + _DEFAULT_TIMEOUTS = { + "join": timedelta(seconds=600), + "last_call": timedelta(seconds=30), + "close": timedelta(seconds=30), + "heartbeat": timedelta(seconds=5), + } + + _join: timedelta + _last_call: timedelta + _close: timedelta + _heartbeat: timedelta + + def __init__( + self, + join: Optional[timedelta] = None, + last_call: Optional[timedelta] = None, + close: Optional[timedelta] = None, + heartbeat: Optional[timedelta] = None, + ) -> None: + self._set_timeouts(join=join, last_call=last_call, close=close, heartbeat=heartbeat) + + @property + def join(self) -> timedelta: + """Get the join timeout.""" + return self._join + + @property + def last_call(self) -> timedelta: + """Get the last call timeout.""" + return self._last_call + + @property + def close(self) -> timedelta: + """Get the close timeout.""" + return self._close + + @property + def heartbeat(self) -> timedelta: + """Get the keep-alive heartbeat timeout.""" + return self._heartbeat + + def _set_timeouts(self, **timeouts: Optional[timedelta]): + for name, timeout in timeouts.items(): + if timeout is None: + timeout = self._DEFAULT_TIMEOUTS[name] + if timeout <= self._ZERO: + raise ValueError(f"The {name} timeout ({timeout}) must be positive.") + setattr(self, "_" + name, timeout) + + +@dataclass(repr=False, eq=False, frozen=True) +class RendezvousSettings: + """Hold the settings of the rendezvous. + + Attributes: + run_id: + The run id of the rendezvous. + min_nodes: + The minimum number of nodes to admit to the rendezvous. + max_nodes: + The maximum number of nodes to admit to the rendezvous. + timeout: + The timeout configuration of the rendezvous. + keep_alive_interval: + The amount of time a node waits before sending a heartbeat to keep + it alive in the rendezvous. + keep_alive_max_attempt: + The maximum number of failed heartbeat attempts after which a node + is considered dead. + """ + + run_id: str + min_nodes: int + max_nodes: int + timeout: RendezvousTimeout + keep_alive_interval: timedelta + keep_alive_max_attempt: int + + +@dataclass(eq=True, order=True, frozen=True) +class _NodeDesc: + """Describe a node in the rendezvous. + + Attributes: + addr: + The FQDN of the node or user specified local node address. + pid: + The id of the process in which the rendezvous handler runs. + local_id: + A process-wide unique id. + """ + + addr: str + pid: int + local_id: int + + def __repr__(self) -> str: + return f"{self.addr}_{self.pid}_{self.local_id}" + + +class _NodeDescGenerator: + """Generate node descriptors. + + A node descriptor is a combination of an FQDN, a process id, and an auto- + incremented integer that uniquely identifies a node in the rendezvous. + """ + + _lock: threading.Lock + _local_id: int + + def __init__(self) -> None: + self._lock = threading.Lock() + + # An integer that is incremented with each call to generate(). + self._local_id = 0 + + def generate(self, local_addr: Optional[str] = None) -> _NodeDesc: + # This method can be called by multiple threads concurrently; therefore, + # we must increment the integer atomically. + with self._lock: + local_id = self._local_id + + self._local_id += 1 + + return _NodeDesc(local_addr or socket.getfqdn(), os.getpid(), local_id) + + +class _RendezvousState: + """Hold the state of a rendezvous. + + Attributes: + round: + The current round of the rendezvous. + complete: + A boolean value indicating whether the current round of the + rendezvous is complete. + deadline: + The time at which the current round of the rendezvous will be + considered complete if it is still waiting for nodes to join. + closed: + A boolean value indicating whether the rendezvous is closed. + participants: + A dictionary of the participants and their corresponding ranks. + wait_list: + A set of nodes that are waiting to participate in the next round of + the rendezvous. + redundancy_list: + A set of nodes that are redundant in the current round and can join + the next rendezvous without triggering re-rendezvous. + last_heartbeats: + A dictionary containing each node's last heartbeat time. + """ + + round: int + complete: bool + deadline: Optional[datetime] + closed: bool + participants: Dict[_NodeDesc, int] + wait_list: Set[_NodeDesc] + redundancy_list: Set[_NodeDesc] + last_heartbeats: Dict[_NodeDesc, datetime] + + def __init__(self) -> None: + self.round = 0 + self.complete = False + self.deadline = None + self.closed = False + self.participants = {} + self.wait_list = set() + self.redundancy_list = set() + self.last_heartbeats = {} + + +def _remove_participant_epilogue(state: _RendezvousState, settings: RendezvousSettings) -> None: + if state.complete: + # If we do not have any participants left, move to the next round. + if not state.participants: + msg = "No participants left in the rendezvous, marking rendezvous as incomplete" + log.debug(msg) + state.complete = False + + state.round += 1 + else: + if len(state.participants) < settings.min_nodes: + msg = ( + f"Number of participants {len(state.participants)}) less than" + f"min_nodes {settings.min_nodes}, clearning deadline in state" + ) + log.debug(msg) + state.deadline = None + + +class _RendezvousStateHolder(ABC): + """Hold the shared rendezvous state synced with other nodes.""" + + @property + @abstractmethod + def state(self) -> _RendezvousState: + """Get the local state.""" + + @abstractmethod + def sync(self) -> Optional[bool]: + """Read or writes the latest state. + + Returns: + A boolean value indicating whether the local state, in case marked + as dirty, was successfully synced with other nodes. + """ + + @abstractmethod + def mark_dirty(self) -> None: + """Mark the local state as dirty.""" + + +class _BackendRendezvousStateHolder(_RendezvousStateHolder): + """Hold the rendezvous state synced with other nodes via a backend. + + Args: + backend: + The rendezvous backend to use. + settings: + The rendezvous settings. + cache_duration: + The amount of time, in seconds, to cache the last rendezvous state + before requesting it from the backend again. + """ + + _backend: RendezvousBackend + _state: _RendezvousState + _settings: RendezvousSettings + _cache_duration: int + _token: Token + _dirty: bool + _last_sync_time: float + _dead_nodes: List[_NodeDesc] + + def __init__( + self, + backend: RendezvousBackend, + settings: RendezvousSettings, + cache_duration: int = 1, + ) -> None: + self._backend = backend + self._state = _RendezvousState() + self._settings = settings + self._cache_duration = cache_duration + self._token = None + self._dirty = False + self._last_sync_time = -1 + self._dead_nodes = [] + + def _record(self, message: str, node_state: NodeState = NodeState.RUNNING): + construct_and_record_rdzv_event( + name=f"{self.__class__.__name__}.{get_method_name()}", + run_id=self._settings.run_id, + message=message, + node_state=node_state, + ) + + @property + def state(self) -> _RendezvousState: + """See base class.""" + return self._state + + def sync(self) -> Optional[bool]: + """See base class.""" + state_bits: Optional[bytes] = None + + token = None + + has_set: Optional[bool] + + if self._dirty: + has_set = False + + state_bits = pickle.dumps(self._state) + + set_response = self._backend.set_state(state_bits, self._token) + if set_response is not None: + state_bits, token, has_set = set_response + else: + has_set = None + + if self._cache_duration > 0: + # Avoid overloading the backend if we are asked to retrieve the + # state repeatedly. Try to serve the cached state. + if self._last_sync_time >= max(time.monotonic() - self._cache_duration, 0): + return None + + get_response = self._backend.get_state() + if get_response is not None: + state_bits, token = get_response + + if state_bits is not None: + try: + self._state = pickle.loads(state_bits) + except pickle.PickleError as exc: + raise RendezvousStateError( + "The rendezvous state is corrupt. See inner exception for details." + ) from exc + else: + self._state = _RendezvousState() + + if has_set and self._dead_nodes and log.isEnabledFor(logging.DEBUG): + node_list = ", ".join(f"'{dead_node}'" for dead_node in self._dead_nodes) + + msg = ( + f"As part of the sync operation the node(s) {node_list} have been removed from the " + f"rendezvous '{self._settings.run_id}' since they had no heartbeat." + ) + self._record(message=msg) + log.debug(msg) + + self._token = token + + self._dirty = False + + self._last_sync_time = time.monotonic() + + self._sanitize() + + return has_set + + def _sanitize(self) -> None: + state = self._state + + expire_time = datetime.utcnow() - ( + self._settings.keep_alive_interval * self._settings.keep_alive_max_attempt + ) + + # Filter out the dead nodes. + self._dead_nodes = [ + node + for node, last_heartbeat in state.last_heartbeats.items() + if last_heartbeat < expire_time + ] + + participant_removed = False + + for dead_node in self._dead_nodes: + msg = f"Detected dead node '{dead_node}', removing it from the rendezvous" + log.debug(msg) + del state.last_heartbeats[dead_node] + + try: + del state.participants[dead_node] + + participant_removed = True + except KeyError: + pass + + try: + state.wait_list.remove(dead_node) + except KeyError: + pass + + try: + state.redundancy_list.remove(dead_node) + except KeyError: + pass + + if participant_removed: + # Common epilogue shared with the _remove_from_participants() + # function of _DistributedRendezvousOpExecutor. + _remove_participant_epilogue(state, self._settings) + + def mark_dirty(self) -> None: + """See base class. + + If the local rendezvous state is dirty, the next sync call will try to + write the changes back to the backend. However this attempt might fail + if another node, which had the same state, also made changes and wrote + them before us. + """ + self._dirty = True + + +class _Action(Enum): + """Specifies the possible actions based on the state of the rendezvous.""" + + KEEP_ALIVE = 1 + ADD_TO_PARTICIPANTS = 2 + ADD_TO_WAIT_LIST = 3 + ADD_TO_REDUNDANCY_LIST = 4 + REMOVE_FROM_PARTICIPANTS = 5 + REMOVE_FROM_WAIT_LIST = 6 + REMOVE_FROM_REDUNDANCY_LIST = 7 + MARK_RENDEZVOUS_COMPLETE = 8 + MARK_RENDEZVOUS_CLOSED = 9 + SYNC = 10 + ERROR_CLOSED = 11 + ERROR_TIMEOUT = 12 + FINISH = 13 + + +class _RendezvousContext: + """Holds the context of the rendezvous. + + Attributes: + node: + The node descriptor associated with the current rendezvous handler + instance. + state: + The current state of the rendezvous. + settings: + The rendezvous settings. + """ + + node: _NodeDesc + state: _RendezvousState + settings: RendezvousSettings + + def __init__( + self, node: _NodeDesc, state: _RendezvousState, settings: RendezvousSettings + ) -> None: + self.node = node + self.state = state + self.settings = settings + + +class _RendezvousOpExecutor(ABC): + """Execute rendezvous operations.""" + + @abstractmethod + def run( + self, + state_handler: Callable[[_RendezvousContext, float], _Action], + deadline: float, + update_deadline: Optional[Callable[[timedelta], float]] = None, + ) -> None: + """Execute a rendezvous operation. + + An operation is run inside a state machine and is expected to transition + the rendezvous from one state to another. + + Args: + state_handler: + A callable that is expected to return the next state transition + action based on the current state of the rendezvous. + deadline: + The time, in seconds, at which the operation will be considered + timed-out. + update_deadline: + Function to generate a new operation deadline if the current + node may participate in the next rendezvous. + """ + + +class _DistributedRendezvousOpExecutor(_RendezvousOpExecutor): + """Execute rendezvous operations using a shared state. + + Args: + node: + The node descriptor associated with the current rendezvous handler + instance. + state_holder: + The ``RendezvousStateHolder`` to use to sync the rendezvous state + with other nodes. + settings: + The rendezvous settings. + """ + + _node: _NodeDesc + _state: _RendezvousState + _state_holder: _RendezvousStateHolder + _settings: RendezvousSettings + + def __init__( + self, + node: _NodeDesc, + state_holder: _RendezvousStateHolder, + settings: RendezvousSettings, + ) -> None: + self._node = node + self._state_holder = state_holder + self._settings = settings + + def _record(self, message: str, node_state: NodeState = NodeState.RUNNING) -> None: + construct_and_record_rdzv_event( + name=f"{self.__class__.__name__}.{get_method_name()}", + run_id=self._settings.run_id, + message=message, + node_state=node_state, + hostname=self._node.addr, + pid=self._node.pid, + local_id=self._node.local_id, + ) + + def run( + self, + state_handler: Callable[[_RendezvousContext, float], _Action], + deadline: float, + update_deadline: Optional[Callable[[timedelta], float]] = None, + ) -> None: + """See base class.""" + action = None + while action != _Action.FINISH: + # Reads or writes the latest rendezvous state shared by all nodes in + # the rendezvous. Note that our local changes might get overridden + # by another node if that node synced its changes before us. + has_set = self._state_holder.sync() + if has_set is not None: + if has_set: + msg = ( + f"The node '{self._node}' has successfully synced its local changes with " + f"other nodes in the rendezvous '{self._settings.run_id}'." + ) + else: + msg = ( + f"The node '{self._node}' has a stale state and failed to sync its local " + f"changes with other nodes in the rendezvous '{self._settings.run_id}'." + ) + + self._record(message=msg) + log.debug(msg) + + self._state = self._state_holder.state + + ctx = _RendezvousContext(self._node, self._state, self._settings) + + # Determine the next action to take based on the current state of + # the rendezvous. + action = state_handler(ctx, deadline) + + if action == _Action.FINISH: + continue + + if action == _Action.ERROR_CLOSED: + raise RendezvousClosedError() + + if action == _Action.ERROR_TIMEOUT: + raise RendezvousTimeoutError() + + if action == _Action.SYNC: + # Delay the execution by one second to avoid overloading the + # backend if we are asked to poll for state changes. + _delay(seconds=1) + else: + if action == _Action.KEEP_ALIVE: + self._keep_alive() + elif action == _Action.ADD_TO_PARTICIPANTS: + self._add_to_participants() + elif action == _Action.ADD_TO_WAIT_LIST: + self._add_to_wait_list() + elif action == _Action.ADD_TO_REDUNDANCY_LIST: + self._add_to_redundancy_list() + elif action == _Action.REMOVE_FROM_PARTICIPANTS: + self._remove_from_participants() + elif action == _Action.REMOVE_FROM_WAIT_LIST: + self._remove_from_wait_list() + elif action == _Action.REMOVE_FROM_REDUNDANCY_LIST: + self._remove_from_redundancy_list() + # update deadline since the node may participate in rendezvous process + if update_deadline: + deadline = update_deadline(self._settings.timeout.join) + elif action == _Action.MARK_RENDEZVOUS_COMPLETE: + self._mark_rendezvous_complete() + elif action == _Action.MARK_RENDEZVOUS_CLOSED: + self._mark_rendezvous_closed() + + # Attempt to sync our changes back to other nodes. + self._state_holder.mark_dirty() + + def _keep_alive(self) -> None: + msg = ( + f"The node '{self._node}' updated its keep-alive heartbeat time for the rendezvous " + f"'{self._settings.run_id}'. Pending sync." + ) + self._record(message=msg) + log.debug(msg) + + self._state.last_heartbeats[self._node] = datetime.utcnow() + + def _add_to_participants(self) -> None: + msg = ( + f"The node '{self._node}' added itself to the participants of round " + f"{self._state.round} of the rendezvous '{self._settings.run_id}'. Pending sync." + ) + self._record(message=msg) + log.debug(msg) + + state = self._state + + try: + state.wait_list.remove(self._node) + except KeyError: + pass + + # The ranks of the participants will be set once the rendezvous is + # complete. + state.participants[self._node] = 0 + + self._keep_alive() + + if len(state.participants) == self._settings.min_nodes: + state.deadline = datetime.utcnow() + self._settings.timeout.last_call + + if len(state.participants) == self._settings.max_nodes: + self._mark_rendezvous_complete() + + def _add_to_wait_list(self) -> None: + msg = ( + f"The node '{self._node}' added itself to the wait list of round " + f"{self._state.round + 1} of the rendezvous '{self._settings.run_id}'. Pending sync." + ) + self._record(message=msg) + log.debug(msg) + + if self._node in self._state.redundancy_list: + self._state.redundancy_list.remove(self._node) + self._state.wait_list.add(self._node) + + self._keep_alive() + + def _add_to_redundancy_list(self) -> None: + msg = ( + f"The node '{self._node}' added itself to the redundancy list of round " + f"{self._state.round + 1} of the rendezvous '{self._settings.run_id}'. Pending sync." + ) + self._record(message=msg) + log.debug(msg) + + self._state.redundancy_list.add(self._node) + + self._keep_alive() + + def _remove_from_participants(self) -> None: + msg = ( + f"The node '{self._node}' removed itself from the participants of round " + f"{self._state.round} of the rendezvous '{self._settings.run_id}'. Pending sync." + ) + self._record(message=msg) + log.debug(msg) + + state = self._state + + del state.participants[self._node] + + del state.last_heartbeats[self._node] + + # Common epilogue shared with the sanitizer() function of + # _BackendRendezvousStateHolder. + _remove_participant_epilogue(state, self._settings) + + def _remove_from_wait_list(self) -> None: + msg = ( + f"The node '{self._node}' removed itself from the wait list of round " + f"{self._state.round + 1} of the rendezvous '{self._settings.run_id}'. Pending sync." + ) + self._record(message=msg) + log.debug(msg) + + self._state.wait_list.remove(self._node) + + del self._state.last_heartbeats[self._node] + + def _remove_from_redundancy_list(self) -> None: + msg = ( + f"The node '{self._node}' removed itself from the redunant list of round " + f"{self._state.round + 1} of the rendezvous '{self._settings.run_id}'. Pending sync." + ) + self._record(message=msg) + log.debug(msg) + + self._state.redundancy_list.remove(self._node) + + del self._state.last_heartbeats[self._node] + + def _mark_rendezvous_complete(self) -> None: + msg = ( + f"The node '{self._node}' marked round {self._state.round} of the rendezvous " + f"'{self._settings.run_id}' as complete. Pending sync." + ) + self._record(message=msg, node_state=NodeState.SUCCEEDED) + log.debug(msg) + + state = self._state + + state.complete = True + state.deadline = None + + # Assign the ranks. + for rank, node in enumerate(sorted(state.participants)): + state.participants[node] = rank + + def _mark_rendezvous_closed(self) -> None: + msg = ( + f"The node '{self._node}' marked the rendezvous '{self._settings.run_id}' as closed. " + "Pending sync." + ) + self._record(message=msg, node_state=NodeState.SUCCEEDED) + log.debug(msg) + + self._state.closed = True + + +def _should_keep_alive(ctx: _RendezvousContext) -> bool: + """Determine whether a keep-alive heartbeat should be sent.""" + try: + last_heartbeat = ctx.state.last_heartbeats[ctx.node] + except KeyError: + return False + + return last_heartbeat <= datetime.utcnow() - ctx.settings.keep_alive_interval + + +class _RendezvousExitOp: + """Represent a rendezvous exit operation.""" + + def __call__(self, ctx: _RendezvousContext, deadline: float) -> _Action: + if ctx.node in ctx.state.participants: + if time.monotonic() > deadline: + return _Action.ERROR_TIMEOUT + return _Action.REMOVE_FROM_PARTICIPANTS + return _Action.FINISH + + +class _RendezvousJoinOp: + """Represent a rendezvous join operation.""" + + def __call__(self, ctx: _RendezvousContext, deadline: float) -> _Action: + state = ctx.state + + # A closed rendezvous means that it no longer accepts new nodes. + if state.closed: + if ctx.node in state.redundancy_list: + msg = f"The rendezvous '{ctx.settings.run_id}' is closed, terminating pending rendezvous." + raise RendezvousGracefulExitError(msg) + return _Action.ERROR_CLOSED + + if ctx.node in state.redundancy_list: + msg = f"The node {ctx.node} is in redunancy list" + log.debug(msg) + # don't apply the timeout logic here, since we want to allow the node to rejoin + if len(state.participants) == ctx.settings.max_nodes: + if _should_keep_alive(ctx): + return _Action.KEEP_ALIVE + else: + return _Action.SYNC + else: + # transition to waiting state that will respect timeouts. + msg = f"The node {ctx.node} is removed from redunancy list" + log.debug(msg) + return _Action.REMOVE_FROM_REDUNDANCY_LIST + + is_participant = ctx.node in state.participants + + # If we are part of the rendezvous and it is already complete there is + # no further action to take. + if state.complete and is_participant: + return _Action.FINISH + + now = time.monotonic() + if now > deadline: + rollback_period = 5 # 5 seconds + + # If we still have time to rollback (a short period on top of the + # operation deadline), try to remove ourself from the rendezvous. + # It is okay if we can't though as our keep-alive will eventually + # expire. + if now <= deadline + rollback_period: + # If we are part of the rendezvous, it means we couldn't find + # enough participants to complete it on time. + if is_participant: + return _Action.REMOVE_FROM_PARTICIPANTS + # If we are in the wait list, it means we couldn't wait till the + # next round of the rendezvous. + if ctx.node in state.wait_list: + return _Action.REMOVE_FROM_WAIT_LIST + return _Action.ERROR_TIMEOUT + + if state.complete: + # If we are here, it means we are not part of the rendezvous. In + # case the rendezvous has capacity for additional participants add + # ourself to the wait list for the next round. + if len(state.participants) < ctx.settings.max_nodes: + if ctx.node not in state.wait_list: + return _Action.ADD_TO_WAIT_LIST + elif len(state.participants) >= ctx.settings.max_nodes: + if ctx.node not in state.redundancy_list and ctx.node not in state.wait_list: + return _Action.ADD_TO_REDUNDANCY_LIST + elif is_participant: + # If the rendezvous has enough number of participants including us, + # check whether we have passed the rendezvous deadline. If yes, + # complete it. + if len(state.participants) >= ctx.settings.min_nodes and \ + len(state.participants) <= ctx.settings.max_nodes: + if cast(datetime, state.deadline) < datetime.utcnow(): + msg = ( + f"The node '{ctx.node}' marking the rendezvous complete, " + f"quorum established within deadline" + ) + log.debug(msg) + return _Action.MARK_RENDEZVOUS_COMPLETE + else: + msg = f"The node '{ctx.node}' can't complete rendezvous: deadline reached" + log.debug(msg) + else: + msg = f"The node '{ctx.node}' can't complete rendezvous: not enough participants" + log.debug(msg) + else: + # The rendezvous is not complete yet and we are not part of it. Try + # to join. + return _Action.ADD_TO_PARTICIPANTS + + if _should_keep_alive(ctx): + return _Action.KEEP_ALIVE + + # At this point either the rendezvous is not complete, but we are part + # of it, which means we have to wait for other participants to join; or + # the rendezvous is complete, but we are not part of it, which means we + # have to wait for the next round. + return _Action.SYNC + + +class _RendezvousCloseOp: + """Represent a rendezvous close operation.""" + + def __call__(self, ctx: _RendezvousContext, deadline: float) -> _Action: + if ctx.state.closed: + return _Action.FINISH + if time.monotonic() > deadline: + return _Action.ERROR_TIMEOUT + return _Action.MARK_RENDEZVOUS_CLOSED + + +class _RendezvousKeepAliveOp: + """Represent a rendezvous keep-alive update operation.""" + + def __call__(self, ctx: _RendezvousContext, deadline: float) -> _Action: + if _should_keep_alive(ctx): + if time.monotonic() > deadline: + return _Action.ERROR_TIMEOUT + return _Action.KEEP_ALIVE + return _Action.FINISH + + +class DynamicRendezvousHandler(RendezvousHandler): + """Represent a handler that sets up a rendezvous among a set of nodes.""" + + # Static + _node_desc_generator = _NodeDescGenerator() + + _this_node: _NodeDesc + _settings: RendezvousSettings + _backend_name: str + _store: Store + _state_holder: _RendezvousStateHolder + _op_executor: _RendezvousOpExecutor + _heartbeat_lock: threading.Lock + _keep_alive_timer: Optional[_PeriodicTimer] + + @classmethod + def from_backend( + cls, + run_id: str, + store: Store, + backend: RendezvousBackend, + min_nodes: int, + max_nodes: int, + local_addr: Optional[str] = None, + timeout: Optional[RendezvousTimeout] = None, + ): + """Create a new :py:class:`DynamicRendezvousHandler`. + + Args: + run_id: + The run id of the rendezvous. + store: + The C10d store to return as part of the rendezvous. + backend: + The backend to use to hold the rendezvous state. + min_nodes: + The minimum number of nodes to admit to the rendezvous. + max_nodes: + The maximum number of nodes to admit to the rendezvous. + local_addr: + The local node address. + timeout: + The timeout configuration of the rendezvous. + """ + # We associate each handler instance with a unique node descriptor. + node = cls._node_desc_generator.generate(local_addr) + + settings = RendezvousSettings( + run_id, + min_nodes, + max_nodes, + timeout or RendezvousTimeout(), + keep_alive_interval=timedelta(seconds=5), + keep_alive_max_attempt=3, + ) + + state_holder = _BackendRendezvousStateHolder(backend, settings) + + return cls(node, settings, backend.name, store, state_holder) + + def __init__( + self, + node: _NodeDesc, + settings: RendezvousSettings, + backend_name: str, + store: Store, + state_holder: _RendezvousStateHolder, + ) -> None: + if not settings.run_id: + raise ValueError("The run id must be a non-empty string.") + + if settings.min_nodes < 1: + raise ValueError( + f"The minimum number of nodes ({settings.min_nodes}) must be greater than zero." + ) + + if settings.max_nodes < settings.min_nodes: + raise ValueError( + f"The maximum number of nodes ({settings.max_nodes}) must be greater than or equal " + f"to the minimum number of nodes ({settings.min_nodes})." + ) + + self._this_node = node + + self._settings = settings + + self._backend_name = backend_name + + self._store = store + + self._state_holder = state_holder + + self._op_executor = _DistributedRendezvousOpExecutor( + self._this_node, self._state_holder, self._settings + ) + + self._heartbeat_lock = threading.Lock() + + self._keep_alive_timer = None + + def _record( + self, + message: str, + node_state: NodeState = NodeState.RUNNING, + rank: Optional[int] = None, + ) -> None: + construct_and_record_rdzv_event( + name=f"{self.__class__.__name__}.{get_method_name()}", + run_id=self._settings.run_id, + message=message, + node_state=node_state, + hostname=self._this_node.addr, + pid=self._this_node.pid, + local_id=self._this_node.local_id, + rank=rank, + ) + + @property + def settings(self) -> RendezvousSettings: + """Get the settings of the rendezvous.""" + return self._settings + + def get_backend(self) -> str: + """See base class.""" + return self._backend_name + + def next_rendezvous(self) -> Tuple[Store, int, int]: + """See base class.""" + msg = ( + f"The node '{self._this_node}' attempts to join the next round of the rendezvous " + f"'{self._settings.run_id}'." + ) + self._record(message=msg) + log.info(msg) + + try: + self._stop_heartbeats() + + # Delay the execution for a small random amount of time if this is our + # first run. This will slightly skew the rendezvous attempts across the + # nodes and reduce the load on the backend. + if self._state_holder.state.round == 0: + _delay(seconds=(0, 0.3)) + + exit_op = _RendezvousExitOp() + join_op = _RendezvousJoinOp() + + deadline = self._get_deadline(self._settings.timeout.join) + self._op_executor.run(exit_op, deadline) + self._op_executor.run( + join_op, + deadline, + self._get_deadline) + + self._start_heartbeats() + + rank, world_size = self._get_world() + store = self._get_store() + + except Exception as e: + self._record( + message=f"{type(e).__name__}: {str(e)}", + node_state=NodeState.FAILED, + ) + raise + + msg = ( + f"The node '{self._this_node}' has joined round {self._state_holder.state.round} of " + f"the rendezvous '{self._settings.run_id}' as rank {rank} in a world of size " + f"{world_size}." + ) + self._record(message=msg, rank=rank) + log.info(msg) + + return store, rank, world_size + + def is_closed(self) -> bool: + """See base class.""" + try: + with self._heartbeat_lock: + self._state_holder.sync() + + return self._state_holder.state.closed + + except Exception as e: + self._record( + message=f"{type(e).__name__}: {str(e)}", + node_state=NodeState.FAILED, + ) + raise + + def set_closed(self) -> None: + """See base class.""" + try: + with self._heartbeat_lock: + self._close() + except Exception as e: + self._record( + message=f"{type(e).__name__}: {str(e)}", + node_state=NodeState.FAILED, + ) + raise + + def num_nodes_waiting(self) -> int: + """See base class.""" + try: + with self._heartbeat_lock: + self._state_holder.sync() + + return len(self._state_holder.state.wait_list) + + except Exception as e: + self._record( + message=f"{type(e).__name__}: {str(e)}", + node_state=NodeState.FAILED, + ) + raise + + def get_run_id(self) -> str: + """See base class.""" + return self._settings.run_id + + def shutdown(self) -> bool: + """See base class.""" + self._stop_heartbeats() + + try: + self._close() + + return True + except RendezvousError as ex: + msg = ( + f"The node '{self._this_node}' has failed to shutdown the rendezvous " + f"'{self._settings.run_id}' due to an error of type {type(ex).__name__}." + ) + self._record(message=msg, node_state=NodeState.FAILED) + log.warning(msg) + + return False + except Exception as e: + self._record( + message=f"{type(e).__name__}: {str(e)}", + node_state=NodeState.FAILED, + ) + raise + + def _close(self) -> None: + op = _RendezvousCloseOp() + + deadline = self._get_deadline(self._settings.timeout.close) + + self._op_executor.run(op, deadline) + + msg = f"The node '{self._this_node}' has closed the rendezvous '{self._settings.run_id}'." + self._record(message=msg, node_state=NodeState.SUCCEEDED) + log.info(msg) + + @staticmethod + def _keep_alive_weak(weak_self) -> None: + self = weak_self() + if self is not None: + self._keep_alive() + + def _keep_alive(self) -> None: + self._heartbeat_lock.acquire() + + op = _RendezvousKeepAliveOp() + + deadline = self._get_deadline(self._settings.timeout.heartbeat) + + try: + self._op_executor.run(op, deadline) + + msg = ( + f"The node '{self._this_node}' has sent a keep-alive heartbeat to the rendezvous " + f"'{self._settings.run_id}'." + ) + self._record(message=msg) + log.debug(msg) + except RendezvousError as ex: + msg = ( + f"The node '{self._this_node}' has failed to send a keep-alive heartbeat to the " + f"rendezvous '{self._settings.run_id}' due to an error of type {type(ex).__name__}." + ) + self._record(message=msg, node_state=NodeState.FAILED) + log.warning(msg) + finally: + self._heartbeat_lock.release() + + def _start_heartbeats(self) -> None: + self._keep_alive_timer = _PeriodicTimer( + self._settings.keep_alive_interval, self._keep_alive_weak, weakref.ref(self) + ) + + self._keep_alive_timer.set_name(f"RendezvousKeepAliveTimer_{self._this_node.local_id}") + + self._keep_alive_timer.start() + + def _stop_heartbeats(self) -> None: + if self._keep_alive_timer is None: + return + + self._keep_alive_timer.cancel() + + def _get_world(self) -> Tuple[int, int]: + state = self._state_holder.state + + return state.participants[self._this_node], len(state.participants) + + def _get_store(self) -> Store: + key_prefix = f"torch.rendezvous.{self._settings.run_id}.{self._state_holder.state.round}" + + return PrefixStore(key_prefix, self._store) + + def _get_deadline(self, timeout: timedelta) -> float: + return time.monotonic() + timeout.total_seconds() + + +def _get_timeout(params: RendezvousParameters, key: str) -> Optional[timedelta]: + timeout = params.get_as_int(key + "_timeout") + if timeout is None: + return None + return timedelta(seconds=timeout) + + +def create_handler( + store: Store, backend: RendezvousBackend, params: RendezvousParameters +) -> DynamicRendezvousHandler: + """Create a new :py:class:`DynamicRendezvousHandler` from the specified parameters. + + Args: + store: + The C10d store to return as part of the rendezvous. + backend: + The backend to use to hold the rendezvous state. + + +-------------------+------------------------------------------------------+ + | Parameter | Description | + +===================+======================================================+ + | join_timeout | The total time, in seconds, within which the | + | | rendezvous is expected to complete. Defaults to 600 | + | | seconds. | + +-------------------+------------------------------------------------------+ + | last_call_timeout | An additional wait amount, in seconds, before | + | | completing the rendezvous once the minimum number of | + | | nodes has been reached. Defaults to 30 seconds. | + +-------------------+------------------------------------------------------+ + | close_timeout | The time, in seconds, within which the rendezvous is | + | | expected to close after a call to | + | | :py:meth:`RendezvousHandler.set_closed` or | + | | :py:meth:`RendezvousHandler.shutdown`. Defaults to | + | | 30 seconds. | + +-------------------+------------------------------------------------------+ + """ + try: + timeout = RendezvousTimeout( + _get_timeout(params, "join"), + _get_timeout(params, "last_call"), + _get_timeout(params, "close"), + ) + + return DynamicRendezvousHandler.from_backend( + params.run_id, + store, + backend, + params.min_nodes, + params.max_nodes, + params.local_addr, + timeout, + ) + except Exception as e: + construct_and_record_rdzv_event( + message=f"{type(e).__name__}: {str(e)}", + run_id=params.run_id, + node_state=NodeState.FAILED, + ) + raise diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/etcd_rendezvous.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/etcd_rendezvous.py new file mode 100644 index 0000000000000000000000000000000000000000..dab767925037bea5b94e8292fbbd6cb791951011 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/etcd_rendezvous.py @@ -0,0 +1,1045 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import json +import logging +import sys +import threading +import time +from typing import Optional + +import etcd # type: ignore[import] +from torch.distributed.elastic.rendezvous import ( + RendezvousClosedError, + RendezvousError, + RendezvousHandler, + RendezvousParameters, + RendezvousTimeoutError, +) + +from .utils import parse_rendezvous_endpoint +from .etcd_store import EtcdStore, cas_delay + + +_log_fmt = logging.Formatter("%(levelname)s %(asctime)s %(message)s") +_log_handler = logging.StreamHandler(sys.stderr) +_log_handler.setFormatter(_log_fmt) + +log = logging.getLogger(__name__) +log.propagate = False +log.setLevel(logging.INFO) +log.addHandler(_log_handler) + + +# Retryable failure exception means the we were too late to make +# a desired state transition (e.g. because of a race condition), +# and should now restart from the beginning. +# A small delay is recommended to avoid spamming Etcd. +class EtcdRendezvousRetryableFailure(Exception): + pass + + +# Similar to retryable failure, but the new state we observed suggests we +# can re-try immediately, i.e. without a need for "safety delay". +class EtcdRendezvousRetryImmediately(Exception): + pass + + +# Default timeout for the rendezvous. +_DEFAULT_TIMEOUT: int = 600 # 10 minutes + +# Additional waiting time after reaching the minimum number of nodes +# in case the rendezvous is elastic (min != max). +_DEFAULT_LAST_CALL_TIMEOUT: int = 30 # 30 seconds + +# Various constants used internally in EtcdRendezvous +CONST_ETCD_SETUP_TTL = 5 +CONST_ETCD_FROZEN_TTL = 10 +CONST_ETCD_JOINABLE_EPHEMERAL_TTL = 10 + +# Ephemeral node TTL for worker's keep-alive key: +CONST_WORKER_KEEPALIVE_TTL = 10 + +# TTL for the ephemeral run_id-specific directory. All rendezvous state data +# for a specific run_id (job instance) is contained within directory. +# Its only role is to clean-up rendezvous data from old runs (for the case when +# etcd server is persistent), and has no affect on correctness, but should be +# larger than any timeouts that a worker process is expected to survive: +CONST_RUNID_SUBROOT_TTL = 7200 # 2 hours + + +class EtcdRendezvousHandler(RendezvousHandler): + """ + Implements a + :py:class:`torch.distributed.elastic.rendezvous.RendezvousHandler` interface + backed by + :py:class:`torch.distributed.elastic.rendezvous.etcd_rendezvous.EtcdRendezvous`. + ``EtcdRendezvousHandler`` uses a URL to configure the type of rendezvous to + use and to pass implementation specific configurations to the rendezvous + module. The basic etcd rendezvous configuration URL looks like the following + :: + + etcd://:/?min_workers=&max_workers= # noqa: W605 + + -- example -- + + etcd://localhost:2379/1234?min_workers=1&max_workers=3 + + The URL above is interpreted as follows: + + 1. Use the rendezvous handler that is registered with the ``etcd`` + scheme + 2. The ``etcd`` endpoint to use is ``localhost:2379`` + 3. ``job_id == 1234`` is used as the prefix in etcd (this allows one to + share a common etcd server for multiple jobs so long as the + ``job_ids`` are guaranteed to be unique). Note that the job id can be + any string (e.g. does not need to be a number) as long as it is + unique. + 4. ``min_workers=1`` and ``max_workers=3`` specifies a range for + membership size - Torch Distributed Elastic starts running the job as + long as the cluster size is greater than or equal to ``min_workers`` + and admits up to ``max_workers`` into the cluster. + + Below are a full list of the parameters that can be passed to etcd + rendezvous: + + +--------------------------------------------+--------------------------+ + | Parameter | Description | + +============================================+==========================+ + | min_workers | minimum number of | + | | workers for the | + | | rendezvous to be valid | + +--------------------------------------------+--------------------------+ + | max_workers | maximum number of | + | | workers to admit | + +--------------------------------------------+--------------------------+ + | timeout | total timeout within | + | | which next_rendezvous is | + | | expected to succeed | + | | (default 600s) | + +--------------------------------------------+--------------------------+ + | last_call_timeout | additional wait amount | + | | (“last call”) after min | + | | number of workers has | + | | been reached (defaults | + | | to 30s) | + +--------------------------------------------+--------------------------+ + | etcd_prefix | path prefix (from etcd | + | | root), inside which all | + | | etcd nodes will be | + | | created (defaults to | + | | ``/torchelastic/p2p``) | + +--------------------------------------------+--------------------------+ + """ + + def __init__(self, rdzv_impl): + self._rdzv_impl = rdzv_impl + + def __del__(self): + # TODO: look into using weakref here instead. + del self._rdzv_impl + + def get_backend(self) -> str: + return "etcd" + + def next_rendezvous(self): + rdzv_version, rank, world_size = self._rdzv_impl.rendezvous_barrier() + + log.info("Creating EtcdStore as the c10d::Store implementation") + store = self._rdzv_impl.setup_kv_store(rdzv_version) + + return store, rank, world_size + + def is_closed(self): + try: + _, state = self._rdzv_impl.get_rdzv_state() + return state["status"] == "closed" + except etcd.EtcdKeyNotFound: + # No rendezvous state, so it cannot be closed. + return False + + def set_closed(self): + self._rdzv_impl.set_closed() + + def num_nodes_waiting(self): + try: + _, state = self._rdzv_impl.get_rdzv_state() + if state["status"] == "final": + return state["num_workers_waiting"] + except etcd.EtcdKeyNotFound: + pass + return 0 + + def get_run_id(self) -> str: + return self._rdzv_impl._run_id + + def shutdown(self) -> bool: + try: + self.set_closed() + return True + except BaseException as e: + log.warning("Shutdown failed. Error occurred: %s", str(e)) + return False + + +# TODO: we should probably handle a few additional errors, +# like EtcdLeaderElectionInProgress and EtcdWatcherCleared. These are +# only relevant for multi-node Etcd ensemble. A simple retry would work, +# but is verbose to add everywhere. Consider wrapping the client calls +# into auto-retry for these errors? +# +class EtcdRendezvous: + """A rendezvous implementation that uses `etcd `__ as the backend store.""" + + def __init__( + self, + client, + prefix, + run_id, + num_min_workers, + num_max_workers, + timeout, + last_call_timeout, + ): + self.client = client + log.info("Etcd machines: %s", self.client.machines) + + self._prefix = prefix + self._run_id = run_id + self._num_min_workers = num_min_workers + self._num_max_workers = num_max_workers + self._timeout = timeout + self._last_call_timeout = last_call_timeout + + # For cleaning up TTL refresher threads (for ephemeral keys) + self._lease_run_id_stop = None + self._lease_this_rank_stop = None + + if not self._prefix.endswith("/"): + self._prefix += "/" + + # Setup a permanent prefix dir, if didn't exist + if self._prefix != "/": + self.create_path_if_not_exists(self._prefix) + + # Lease a "sub-root" node specific to this job instance (run_id) + self.create_path_if_not_exists(self.get_path(""), ttl=CONST_RUNID_SUBROOT_TTL) + self._lease_run_id_stop = self.setup_lease_renewal( + self.get_path(""), ttl=CONST_RUNID_SUBROOT_TTL + ) + + # Subdir for all rendezvous work + self.create_path_if_not_exists(self.get_path("/rdzv")) + + # Create a rendezvous version counter, if doesn't exist + try: + self.client.write( + key=self.get_path("/rdzv/version_counter"), value="0", prevExist=False + ) + except etcd.EtcdAlreadyExist: + pass + + def __del__(self): + # TODO: look into using weakref here instead. + if self._lease_run_id_stop is not None: + self._lease_run_id_stop.set() + + if self._lease_this_rank_stop is not None: + self._lease_this_rank_stop.set() + + def rendezvous_barrier(self): + """ + Main entry point for next rendezvous. + + This method is blocking until rendezvous succeeds or a timeout occurs. + + Returns: + ``(rdzv_version, rank, world_size)`` + + Raises: + RendezvousTimeoutError - timeout waiting for rendezvous + RendezvousClosedError - rendezvous is or was closed while waiting + RendezvousError - other persistent errors that + render the rendezvous non-retryable + """ + self._rendezvous_deadline = time.time() + self._timeout + while True: + if time.time() > self._rendezvous_deadline: + raise RendezvousTimeoutError() + + log.info("Attempting to join next rendezvous") + try: + # Dis-own our lease in the previous rendezvous, if exists + if self._lease_this_rank_stop is not None: + self._lease_this_rank_stop.set() + + return self.init_phase() + + except EtcdRendezvousRetryImmediately: + # The type of failure suggests we can retry without delay + pass + + except EtcdRendezvousRetryableFailure: + # In case of retryable failure, wait a small delay + # to avoid spamming etcd + time.sleep(1) + + except RendezvousTimeoutError: + log.info("Rendezvous timeout occurred in EtcdRendezvousHandler") + raise + + except RendezvousClosedError: + log.info( + "Rendezvous for run_id=%s was observed to be closed", self._run_id + ) + raise + + except RendezvousError: + raise + + except Exception as e: + # In case of a general exception, wait a small delay + # to avoid spamming etcd + # FIXME: there are a few things that fall under this like + # etcd.EtcdKeyNotFound, etc, which could be handled more explicitly. + log.info("Rendezvous attempt failed, will retry. Reason: %s", e) + time.sleep(1) + + def init_phase(self): + """ + Initially, the rendezvous state is expected to be one of: + + 1. empty (non-existent) - in this case we try to create a new one. + 2. joinable - we try to join it. + 3. final - we announce ourselves as waiting, and go into monitoring mode + + Any other state is considered transitional, and will be retried after + a short delay. + + Returns: + ``(rdzv_version, rank, world_size)`` + + Raises: + RendezvousClosedError - current rendezvous was/is closed + EtcdRendezvousRetryableFailure - observed some intermediate + state, which is best handled by retrying later + """ + try: + active_version = self.try_create_rendezvous() + state = json.loads(active_version.value) + log.info("New rendezvous state created: %s", state) + except etcd.EtcdAlreadyExist: + active_version, state = self.get_rdzv_state() + # Note: it is possible for above query to fail (etcd.EtcdKeyNotFound), + # but this is ok for us - just means we'll restart from beginning. + log.info("Observed existing rendezvous state: %s", state) + + if state["status"] == "closed": + raise RendezvousClosedError() + + if state["status"] == "joinable": + return self.join_phase(state["version"]) + + if state["status"] == "final": + self.handle_existing_rendezvous(state["version"]) + raise EtcdRendezvousRetryImmediately() + + self.try_wait_for_state_change(etcd_index=active_version.etcd_index + 1) + raise EtcdRendezvousRetryableFailure() + + def join_phase(self, expected_version): + """ + We observed a rendezvous state in 'joinable' state, and attempt to join this + particular version, and then wait for all other peers to join. + """ + # Failure to join will propagate an exception, causing a re-entry. + active_version, this_rank = self.join_rendezvous(expected_version) + state = json.loads(active_version.value) + log.info( + "Joined rendezvous version %s as rank %s. Full state: %s", + state["version"], this_rank, state + ) + + # If this worker was first to reach num_min_workers requirement, + # and rendezvous is still joinable (therefore it is elastic), + # then this worker will be responsible for waiting out the "last call" + # timeout and closing (i.e. transitioning to 'frozen') the rendezvous + # afterwards. + # As a safety against a potential failure of this worker (during the + # last call timeout), the rendezvous state is made ephemeral + # when min_num_workers is reached. + + if this_rank == self._num_min_workers - 1 and state["status"] == "joinable": + log.info("Rank %s is responsible for join last call.", this_rank) + last_call_deadline = time.time() + self._last_call_timeout + self.handle_join_last_call(expected_version, last_call_deadline) + log.info("Rank %s finished join last call.", this_rank) + + # Wait for rendezvous state to be frozen, which means a fixed set of peers + log.info("Waiting for remaining peers.") + active_version = self.wait_for_peers(expected_version) + state = json.loads(active_version.value) + + assert ( + state["version"] == expected_version + ), "Logic error: failed to observe version mismatch" + + return self.confirm_phase(expected_version, this_rank) + + def confirm_phase(self, expected_version, this_rank): + """ + Once the rendezvous state transitions from 'joinable' to 'frozen', + we have every participant confirm their membership and setup per-member + keep-alive TTL keys, and then wait for all other participants to confirm, + which would then successfully conclude this rendezvous. + """ + log.info("All peers arrived. Confirming membership.") + self.confirm_membership(expected_version, this_rank) + + log.info("Waiting for confirmations from all peers.") + active_version = self.wait_for_final(expected_version) + state = json.loads(active_version.value) + + log.info( + "Rendezvous version %s is complete. Final state: %s", + state["version"], state + ) + + # Rendezvous version number; our rank in it; world size + return state["version"], this_rank, len(state["participants"]) + + def handle_existing_rendezvous(self, expected_version): + """ + Handle the case when there's an existing (state 'final) rendezvous already + in place, and we have to announce ourselves waiting, and wait until + the next rendezvous opportunity. + """ + # If state is 'final' -> increment num_workers_waiting + # Then, observe state changes: + # 1. if it's no longer final -> bail out and re-try + # 2. if keep alives are missing, destroy it and bail out. + active_state = self.announce_self_waiting(expected_version) + log.info( + "Added self to waiting list. Rendezvous full state: %s", + active_state.value + ) + + self.wait_for_rendezvous_to_free(expected_version) + log.info("Previously existing rendezvous state changed. Will re-try joining.") + + def try_create_rendezvous(self): + """ + Create new rendezvous state or raise an exception that indicates an unexpected state (e.g. already exists). + + Raises: + RendezvousError - on unexpected state + """ + # Initially active_version is ephemeral - this is to handle the + # possibility that might fail to complete the setup transaction, + # i.e. the transition "setup" -> "joinable". + active_version = self.client.write( + key=self.get_path("/rdzv/active_version"), + value=json.dumps({"status": "setup"}), + prevExist=False, + ttl=CONST_ETCD_SETUP_TTL, + ) + + try: + version_counter = self.client.get(self.get_path("/rdzv/version_counter")) + version_counter.value = str(int(version_counter.value) + 1) + self.client.update(version_counter) + except (etcd.EtcdKeyNotFound, etcd.EtcdCompareFailed) as e: + raise RendezvousError( + "Unexpected state of EtcdRendezvousHandler, worker needs to die." + ) from e + + # Any failure below results in declaring a retryable rendezvous failure. + # The ephemeral /rdzv/active_version will expire and someone can then + # re-try the setup process. + + # Create directory node for participant data + self.client.write( + key=self.get_path(f"/rdzv/v_{version_counter.value}"), + value=None, + dir=True, + prevExist=False, + ) + + # Publish rendezvous version and signal it is ready-to-be-joined. + # If rendezvous was set closed just before this, a retry will happen, + # where the closed condition will be handled. + return self.client.test_and_set( + key=self.get_path("/rdzv/active_version"), + value=json.dumps( + { + "status": "joinable", + "version": version_counter.value, + "participants": [], + } + ), + prev_value=active_version.value, + ) + + def join_rendezvous(self, expected_version): + """Helper method for the join phase.""" + # Use compare-and-swap to add self to rendezvous state: + while True: + cas_delay() + active_version, state = self.get_rdzv_state() + + if state["status"] != "joinable": + raise EtcdRendezvousRetryableFailure( + "Rendezvous state became non-joinable before we could join. " + "Must join next one." + ) + + if state["version"] != expected_version: + raise EtcdRendezvousRetryImmediately( + "Rendezvous version changed. Must try join the new one." + ) + + assert ( + len(state["participants"]) < self._num_max_workers + ), "Logic error: joinable rendezvous should always have space left" + + this_rank = len(state["participants"]) + state["participants"].append(this_rank) + + # When reaching min workers, or changing state to frozen, we'll set + # the active_version node to be ephemeral. + set_ttl: Optional[int] = None + if len(state["participants"]) == self._num_max_workers: + state["status"] = "frozen" + state["keep_alives"] = [] + set_ttl = CONST_ETCD_FROZEN_TTL + elif len(state["participants"]) >= self._num_min_workers: + set_ttl = CONST_ETCD_JOINABLE_EPHEMERAL_TTL + + try: + # Compare-and-swap. + active_version = self.client.test_and_set( + key=self.get_path("/rdzv/active_version"), + value=json.dumps(state), + prev_value=active_version.value, + ttl=set_ttl, + ) + # We succeeded joining. + return active_version, this_rank + + except etcd.EtcdCompareFailed: + log.info("Join rendezvous CAS unsuccessful, retrying") + + def wait_for_peers(self, expected_version): + """Helper method for the join phase.""" + active_version, state = self.get_rdzv_state() + while True: + if state["status"] == "frozen" and state["version"] == expected_version: + # Success, all peers arrived. + return active_version + + elif state["status"] == "joinable" and state["version"] == expected_version: + # Continue waiting for any interesting events. + active_version, state = self.try_wait_for_state_change( + etcd_index=active_version.etcd_index + 1 + ) + + else: + # No valid transition possible at this point + raise EtcdRendezvousRetryableFailure( + "Rendezvous state transition no longer possible. Must re-enter." + ) + + def confirm_membership(self, expected_version, this_rank): + """Helper method for the confirm phase.""" + # Compare-and-swap loop + while True: + cas_delay() + active_version, state = self.get_rdzv_state() + + if state["status"] != "frozen": + raise EtcdRendezvousRetryImmediately( + "Rendezvous no longer frozen, before we confirmed. " + "Must join next one" + ) + if state["version"] != expected_version: + raise EtcdRendezvousRetryImmediately( + "Rendezvous version changed. Must try join the new one." + ) + + this_lease_key = self.get_path( + f"/rdzv/v_{expected_version}/rank_{this_rank}" + ) + self.client.set(this_lease_key, value=None, ttl=CONST_WORKER_KEEPALIVE_TTL) + + state["keep_alives"].append(this_lease_key) + if len(state["keep_alives"]) == len(state["participants"]): + # Everyone confirmed (this rank is last to do so) + state["status"] = "final" + state["num_workers_waiting"] = 0 + finalize = True + else: + finalize = False + + try: + # Compare-and-swap. If new state is still frozen, keep it ephemeral. + active_version = self.client.test_and_set( + key=self.get_path("/rdzv/active_version"), + value=json.dumps(state), + prev_value=active_version.value, + ttl=None if finalize else CONST_ETCD_FROZEN_TTL, + ) + + self._lease_this_rank_stop = self.setup_lease_renewal( + this_lease_key, ttl=CONST_WORKER_KEEPALIVE_TTL + ) + return active_version + + except etcd.EtcdCompareFailed: + log.info("Confirm membership CAS unsuccessful, retrying") + + def wait_for_final(self, expected_version): + """Helper method for the confirm phase.""" + active_version, state = self.get_rdzv_state() + while True: + if state["status"] == "final" and state["version"] == expected_version: + # Success. This rendezvous is final, and we accept it. + return active_version + + elif state["status"] == "frozen" and state["version"] == expected_version: + # Continue waiting for any interesting events. + active_version, state = self.try_wait_for_state_change( + etcd_index=active_version.etcd_index + 1 + ) + + else: + # No valid transition possible at this point + raise EtcdRendezvousRetryableFailure( + "Rendezvous state transition no longer possible. Must re-enter." + ) + + def announce_self_waiting(self, expected_version): + """ + Announce this worker is waiting (via num_workers_waiting counter) to join next + rendezvous, but only if state and version match. + """ + while True: + cas_delay() + active_version, state = self.get_rdzv_state() + + if state["status"] != "final" or state["version"] != expected_version: + raise EtcdRendezvousRetryImmediately() + + # Increment counter to signal an additional waiting worker. + state["num_workers_waiting"] += 1 + + try: + active_version = self.client.test_and_set( + key=self.get_path("/rdzv/active_version"), + value=json.dumps(state), + prev_value=active_version.value, + ) + return active_version + + except etcd.EtcdCompareFailed: + log.info("Announce self as waiting CAS unsuccessful, retrying") + + def wait_for_rendezvous_to_free(self, expected_version): + """ + When there's an existing valid rendezvous in state 'final', we have to wait until the next opportunity to join. + + Such opportunity may come from: + + 1. rendezvous state changed by someone else, in which case we unblock and retry. + 2. rendezvous becomes invalid because at least one member failed to renew their + leased keep_alive node. We detect this, and destroy the rendezvous. + """ + active_version, state = self.get_rdzv_state() + while True: + if state["status"] != "final" or state["version"] != expected_version: + return + + # Check if current rendezvous state is valid, in the sense that all + # its members are alive (renewing their lease). + # If not, try destroy this rendezvous, so a new one can be created. + alive_members = self.client.get( + self.get_path(f"/rdzv/v_{expected_version}") + ) + keep_alive_keys = [ch.key for ch in alive_members.children] + + for key in state["keep_alives"]: + if key not in keep_alive_keys: + # This participant didn't renew their lease. We'll declare this + # rendezvous version as dead (but only if it hadn't changed) + log.info("Keep-alive key %s is not renewed.", key) + log.info( + "Rendezvous version %s is incomplete. ", + expected_version + ) + log.info("Attempting to destroy it.") + + # Compare-and-delete operation. Throws if compare failed, + # which means rendezvous was already destroyed/re-created/closed, + # and we can try to re-enter the barrier. + self.client.delete( + key=self.get_path("/rdzv/active_version"), + prevValue=active_version.value, + ) + + log.info( + "Destroyed rendezvous version %s successfully.", + expected_version + ) + + # We can return (and retry) immediately + return + + # Existing rendezvous seems valid, no reason to destroy it. + # We just have to wait until something changes and re-check. + try: + overall_timeout = ( + max(self._rendezvous_deadline - time.time(), 0.0) + 1.0 + ) + self.client.watch( + key=self.get_path("/rdzv"), + index=active_version.etcd_index + 1, + recursive=True, + timeout=overall_timeout, + ) + except (etcd.EtcdEventIndexCleared, etcd.EtcdWatchTimedOut): + pass + + if time.time() > self._rendezvous_deadline: + raise RendezvousTimeoutError() + active_version, state = self.get_rdzv_state() + + def handle_join_last_call(self, expected_version, deadline): + """ + After we reach min number of workers, one particular worker takes on the + responsibility of waiting an additional timeout before closing the join window. + If the worker responsible for this fails, the rendezvous will be destroyed due + to expiring TTL, and the other participants will re-rendezvous. + + Here we expect to see state + Exit gracefully if either: + + 1. state becomes + 2. timeout happens (reaching deadline), in which case + we try the transition to + + Exit with exception otherwise. + """ + active_version, state = self.get_rdzv_state() + while True: + if state["status"] == "frozen" and state["version"] == expected_version: + # Worker set became frozen before last-call timeout. This is possible + # when num_max_workers is reached before the timeout. + return + + if state["status"] != "joinable" or state["version"] != expected_version: + raise EtcdRendezvousRetryableFailure( + "Rendezvous state transition no longer possible. Must re-enter." + ) + + # If timeout occurred, attempt a state transition (joinable -> frozen) + if time.time() >= deadline: + state["status"] = "frozen" + state["keep_alives"] = [] + try: + active_version = self.client.test_and_set( + key=self.get_path("/rdzv/active_version"), + value=json.dumps(state), + prev_value=active_version.value, + ttl=CONST_ETCD_FROZEN_TTL, + ) + # We successfully made this rendezvous frozen. + return + except etcd.EtcdCompareFailed: + log.info("Join last-call transition CAS unsuccessful. Will retry") + cas_delay() + active_version, state = self.get_rdzv_state() + continue + + # Timeout did not occur, so we must refresh TTL, and wait for + # further changes. Note: we only want TTL to be refreshed if + # state is still joinable, hence we use CAS for that here, + # even though we don't change any of the data. + try: + active_version = self.client.test_and_set( + key=self.get_path("/rdzv/active_version"), + value=active_version.value, + prev_value=active_version.value, + ttl=CONST_ETCD_JOINABLE_EPHEMERAL_TTL, + ) + + # Minimize "oversleeping": + timeout = min( + CONST_ETCD_JOINABLE_EPHEMERAL_TTL / 2, + deadline - time.time() + 1.0, # Oversleeping by 1s is ok. + ) + active_version, state = self.try_wait_for_state_change( + etcd_index=active_version.etcd_index + 1, timeout=timeout + ) + except etcd.EtcdCompareFailed: + log.info("Join last-call TTL refresh CAS unsuccessful, will retry") + cas_delay() + active_version, state = self.get_rdzv_state() + + def set_closed(self): + """ + Mark rendezvous 'closed' for current run_id, which is used to signal other + participants to not attempt to perform (re-)rendezvous. This is useful + when one of the workers decides the job is complete. + """ + while True: + active_version, state = self.get_rdzv_state() + + if state["status"] == "closed": + # Already closed by someone else. + return + + state["status"] = "closed" + try: + self.client.test_and_set( + key=self.get_path("/rdzv/active_version"), + value=json.dumps(state), + prev_value=active_version.value, + ) + return + + except etcd.EtcdCompareFailed: + log.info("Set closed CAS unsuccessful, retrying") + cas_delay() + + def get_rdzv_state(self): + active_version = self.client.get(key=self.get_path("/rdzv/active_version")) + return active_version, json.loads(active_version.value) + + def try_wait_for_state_change(self, etcd_index, timeout=None): + # Don't sleep past the overall deadline (at least more than by 1s) + overall_timeout = max(self._rendezvous_deadline - time.time(), 0.0) + 1.0 + timeout = overall_timeout if timeout is None else min(timeout, overall_timeout) + + try: + self.client.watch( + self.get_path("/rdzv/active_version"), index=etcd_index, timeout=timeout + ) + except (etcd.EtcdEventIndexCleared, etcd.EtcdWatchTimedOut): + pass + + if time.time() > self._rendezvous_deadline: + raise RendezvousTimeoutError() + + # Unfortunately, we have to do another fetch in order to get last etcd_index. + return self.get_rdzv_state() + + def get_path(self, path): + if not path.startswith("/"): + path = "/" + path + + return f"{self._prefix}run_{self._run_id}{path}" + + def create_path_if_not_exists(self, full_path, ttl=None): + try: + self.client.write( + key=full_path, value=None, dir=True, prevExist=False, ttl=ttl + ) + except etcd.EtcdAlreadyExist: + pass + + def setup_lease_renewal(self, full_path, ttl): + # NOTE: For ephemeral key TTL renewal (~lease) to work correctly, + # make sure you don't call any long-blocking methods that do not + # release the Python's GIL! An example of this is calling a pybind11 + # extension function that is blocking / long-running, but is not + # doing a scoped release of the GIL. + def lease_worker(client, path, ttl, stop_event): + while True: + try: + client.refresh(path, ttl=ttl) + except etcd.EtcdKeyNotFound: + break + except ConnectionRefusedError: + # This error usually occurs during test when the server already got terminated but the + # python garbage collector have not yet invoked the __del__ method. + break + + if stop_event.wait(timeout=ttl / 2): + break + + lease_stop_event = threading.Event() + lease_thread = threading.Thread( + target=lease_worker, args=(self.client, full_path, ttl, lease_stop_event) + ) + + lease_thread.daemon = True + lease_thread.start() + + return lease_stop_event + + def store_extra_data(self, rdzv_version, key, value): + node = self.get_path(f"/rdzv/v_{rdzv_version}/extra_data") + try: + # If first time we are storing anything: + extra_data = self.client.write( + key=node, value=json.dumps({key: value}), prevExist=False + ) + return + except etcd.EtcdAlreadyExist: + pass + + # CAS loop, to make sure we don't lose concurrent stores. + while True: + # We never delete extra_data. Failure here should be fatal, no special handling. + extra_data = self.client.get(node) + + new_extra_data_value = json.loads(extra_data.value) + new_extra_data_value[key] = value + + try: + extra_data = self.client.test_and_set( + key=node, + value=json.dumps(new_extra_data_value), + prev_value=extra_data.value, + ) + return + except etcd.EtcdCompareFailed: + log.info("Store extra_data CAS unsuccessful, retrying") + time.sleep(0.1) + + def load_extra_data(self, rdzv_version, key, timeout=None): + # 'extra_data' node itself, and the directory it is located in: + node = self.get_path(f"/rdzv/v_{rdzv_version}/extra_data") + node_dir = self.get_path(f"/rdzv/v_{rdzv_version}") + + # TODO: implement timeout + # https://github.com/pytorch/elastic/issues/12 + while True: + # Combined wait for the node itself, and the key inside it. + root = self.client.get(node_dir) + + # Find the extra_data node, if it exists + extra_data = [n for n in root.children if n.key == node] + assert len(extra_data) <= 1 + + # Node for extra_data exists, check the desired key inside it. + if len(extra_data) == 1: + extra_data_dict = json.loads(extra_data[0].value) + if key in extra_data_dict: + return extra_data_dict[key] + + # The 'extra_data' node doesn't exist, or they key isn't published yet. + # Wait for interesting events on the extra_data node and retry. + try: + self.client.watch(node, index=root.etcd_index + 1) + except (etcd.EtcdEventIndexCleared, etcd.EtcdWatchTimedOut): + pass + + def setup_kv_store(self, rdzv_version): + store_path = self.get_path(f"/rdzv/v_{rdzv_version}/kv") + self.create_path_if_not_exists(store_path) + return EtcdStore(etcd_client=self.client, etcd_store_prefix=store_path) + + +def _create_etcd_client(params: RendezvousParameters) -> etcd.Client: + """Create a new ``etcd.Client`` from the specified ``RendezvousParameters``.""" + hostname, port = parse_rendezvous_endpoint(params.endpoint, 2379) + + # The communication protocol + protocol = params.config.get("protocol") + if protocol is None: + protocol = "http" + else: + if protocol != "http" and protocol != "https": + raise ValueError("The etcd protocol must be HTTP or HTTPS.") + + # The SSL client certificate + ssl_cert = params.config.get("cert") + if ssl_cert is not None: + cert_key = params.config.get("key") + if cert_key is not None: + # The etcd client expects the certificate key as the second element + # of the `cert` tuple. + ssl_cert = (ssl_cert, cert_key) + + # The root certificate + ca_cert = params.config.get("cacert") + + return etcd.Client( + hostname, + port, + protocol=protocol, + cert=ssl_cert, + ca_cert=ca_cert, + allow_reconnect=True, + ) + + +# Handler for torch.distributed "static" registration +def create_rdzv_handler(params: RendezvousParameters) -> RendezvousHandler: + """ + Usage: + + :: + + rdzv_params = RendezvousParameters( + backend="etcd", + endpoint="192.168.0.42:2379", + run_id="123", + min_nodes=4, + max_nodes=8, + timeout=300, + last_call_timeout=30, + etcd_prefix="custom_prefix", + protocol="https", + cacert="/etc/kubernetes/certs/ca.crt", + cert="/etc/kubernetes/certs/client.crt", + key="/etc/kubernetes/certs/client.key") + # -- or -- + rdzv_params = RendezvousParameters( + backend="etcd", + endpoint="192.168.0.42:2379", + run_id="123", + min_nodes=4, + max_nodes=8) + + etcd_rdzv_handler = create_etcd_rendezvous_handler(rdzv_params) + + + Where: + run_id - unique id for this training job instance, + min_nodes - min number of workers expected to join the rendezvous, + max_nodes - max number of workers allowed to join the rendezvous, + defaults to min_workers is not specified. + timeout - total timeout within which next_rendezvous is expected to + succeed; a RendezvousTimeoutError is raised otherwise; + Defaults is 600 (10 minutes). + last_call_timeout - additional wait amount ("last call") after + min number of workers has been reached. + Defaults to 30 seconds. + etcd_prefix - path prefix (from etcd root), inside which all + etcd nodes will be created. + Default is "/torchelastic/p2p". + protocol - http (default) or https to access etcd. + cacert - CA cert to access etcd, only makes sense with https. + cert - client cert to access etcd, only makes sense with https. + key - client key to access etcd, only makes sense with https. + """ + client = _create_etcd_client(params) + + etcd_prefix = params.get("etcd_prefix", "/torchelastic/p2p") + + rdzv = EtcdRendezvous( + client=client, + prefix=etcd_prefix, + run_id=params.run_id, + num_min_workers=params.min_nodes, + num_max_workers=params.max_nodes, + timeout=params.get_as_int("timeout", _DEFAULT_TIMEOUT), + last_call_timeout=params.get_as_int("last_call_timeout", _DEFAULT_LAST_CALL_TIMEOUT), + ) + return EtcdRendezvousHandler(rdzv_impl=rdzv) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/etcd_rendezvous_backend.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/etcd_rendezvous_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..cacb888590f88567a2cd094328916389de6302d8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/etcd_rendezvous_backend.py @@ -0,0 +1,213 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import binascii +from base64 import b64decode, b64encode +from typing import Optional, Tuple, cast + +import urllib3.exceptions # type: ignore[import] +from etcd import Client as EtcdClient # type: ignore[import] +from etcd import ( + EtcdAlreadyExist, + EtcdCompareFailed, + EtcdException, + EtcdKeyNotFound, + EtcdResult, +) +from torch.distributed import Store + +from .api import RendezvousConnectionError, RendezvousParameters, RendezvousStateError +from .dynamic_rendezvous import RendezvousBackend, Token +from .etcd_store import EtcdStore +from .utils import parse_rendezvous_endpoint + + +class EtcdRendezvousBackend(RendezvousBackend): + """Represents an etcd-based rendezvous backend. + + Args: + client: + The ``etcd.Client`` instance to use to communicate with etcd. + run_id: + The run id of the rendezvous. + key_prefix: + The path under which to store the rendezvous state in etcd. + ttl: + The TTL of the rendezvous state. If not specified, defaults to two hours. + """ + + _DEFAULT_TTL = 7200 # 2 hours + + _client: EtcdClient + _key: str + _ttl: int + + def __init__( + self, + client: EtcdClient, + run_id: str, + key_prefix: Optional[str] = None, + ttl: Optional[int] = None, + ) -> None: + if not run_id: + raise ValueError("The run id must be a non-empty string.") + + self._client = client + + if key_prefix: + self._key = key_prefix + "/" + run_id + else: + self._key = run_id + + if ttl and ttl > 0: + self._ttl = ttl + else: + self._ttl = self._DEFAULT_TTL + + @property + def name(self) -> str: + """See base class.""" + return "etcd-v2" + + def get_state(self) -> Optional[Tuple[bytes, Token]]: + """See base class.""" + try: + result = self._client.read(self._key) + except EtcdKeyNotFound: + return None + except (EtcdException, urllib3.exceptions.TimeoutError) as exc: + raise RendezvousConnectionError( + "The connection to etcd has failed. See inner exception for details." + ) from exc + + return self._decode_state(result) + + def set_state( + self, state: bytes, token: Optional[Token] = None + ) -> Optional[Tuple[bytes, Token, bool]]: + """See base class.""" + base64_state = b64encode(state).decode() + + kwargs = {} + + def get_state(): + result = self.get_state() + if result is not None: + tmp = *result, False + # Python 3.6 does not support tuple unpacking in return + # statements. + return tmp + return None + + if token: + try: + token = int(token) + except ValueError: + return get_state() + + if token: + kwargs["prevIndex"] = token + else: + kwargs["prevExist"] = False + + try: + result = self._client.write(self._key, base64_state, self._ttl, **kwargs) + except (EtcdAlreadyExist, EtcdCompareFailed): + result = None + except (EtcdException, urllib3.exceptions.TimeoutError) as exc: + raise RendezvousConnectionError( + "The connection to etcd has failed. See inner exception for details." + ) from exc + + if result is None: + return get_state() + + tmp = *self._decode_state(result), True + return tmp + + def _decode_state(self, result: EtcdResult) -> Tuple[bytes, Token]: + base64_state = result.value.encode() + + try: + state = b64decode(base64_state) + except binascii.Error as exc: + raise RendezvousStateError( + "The state object is corrupt. See inner exception for details." + ) from exc + + return state, result.modifiedIndex + + +def _create_etcd_client(params: RendezvousParameters) -> EtcdClient: + host, port = parse_rendezvous_endpoint(params.endpoint, default_port=2379) + + # The timeout + read_timeout = cast(int, params.get_as_int("read_timeout", 60)) + if read_timeout <= 0: + raise ValueError("The read timeout must be a positive integer.") + + # The communication protocol + protocol = params.get("protocol", "http").strip().lower() + if protocol != "http" and protocol != "https": + raise ValueError("The protocol must be HTTP or HTTPS.") + + # The SSL client certificate + ssl_cert = params.get("ssl_cert") + if ssl_cert: + ssl_cert_key = params.get("ssl_cert_key") + if ssl_cert_key: + # The etcd client expects the certificate key as the second element + # of the `cert` tuple. + ssl_cert = (ssl_cert, ssl_cert_key) + + # The root certificate + ca_cert = params.get("ca_cert") + + try: + return EtcdClient( + host, + port, + read_timeout=read_timeout, + protocol=protocol, + cert=ssl_cert, + ca_cert=ca_cert, + allow_reconnect=True, + ) + except (EtcdException, urllib3.exceptions.TimeoutError) as exc: + raise RendezvousConnectionError( + "The connection to etcd has failed. See inner exception for details." + ) from exc + + +def create_backend(params: RendezvousParameters) -> Tuple[EtcdRendezvousBackend, Store]: + """Create a new :py:class:`EtcdRendezvousBackend` from the specified parameters. + + +--------------+-----------------------------------------------------------+ + | Parameter | Description | + +==============+===========================================================+ + | read_timeout | The read timeout, in seconds, for etcd operations. | + | | Defaults to 60 seconds. | + +--------------+-----------------------------------------------------------+ + | protocol | The protocol to use to communicate with etcd. Valid | + | | values are "http" and "https". Defaults to "http". | + +--------------+-----------------------------------------------------------+ + | ssl_cert | The path to the SSL client certificate to use along with | + | | HTTPS. Defaults to ``None``. | + +--------------+-----------------------------------------------------------+ + | ssl_cert_key | The path to the private key of the SSL client certificate | + | | to use along with HTTPS. Defaults to ``None``. | + +--------------+-----------------------------------------------------------+ + | ca_cert | The path to the rool SSL authority certificate. Defaults | + | | to ``None``. | + +--------------+-----------------------------------------------------------+ + """ + client = _create_etcd_client(params) + + backend = EtcdRendezvousBackend(client, params.run_id, key_prefix="/torch/elastic/rendezvous") + + store = EtcdStore(client, "/torch/elastic/store") + + return backend, store diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/etcd_server.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/etcd_server.py new file mode 100644 index 0000000000000000000000000000000000000000..a220f2ca5917b01af9818e5b38efe00d8fcfdf7f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/etcd_server.py @@ -0,0 +1,246 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +import atexit +import logging +import os +import shlex +import shutil +import socket +import subprocess +import tempfile +import time +from typing import Optional, TextIO, Union + +try: + import etcd # type: ignore[import] +except ModuleNotFoundError: + pass + + +log = logging.getLogger(__name__) + + +def find_free_port(): + """ + Find a free port and binds a temporary socket to it so that the port can be "reserved" until used. + + .. note:: the returned socket must be closed before using the port, + otherwise a ``address already in use`` error will happen. + The socket should be held and closed as close to the + consumer of the port as possible since otherwise, there + is a greater chance of race-condition where a different + process may see the port as being free and take it. + + Returns: a socket binded to the reserved free port + + Usage:: + + sock = find_free_port() + port = sock.getsockname()[1] + sock.close() + use_port(port) + """ + addrs = socket.getaddrinfo( + host="localhost", port=None, family=socket.AF_UNSPEC, type=socket.SOCK_STREAM + ) + + for addr in addrs: + family, type, proto, _, _ = addr + try: + s = socket.socket(family, type, proto) + s.bind(("localhost", 0)) + s.listen(0) + return s + except OSError as e: + s.close() # type: ignore[possibly-undefined] + print(f"Socket creation attempt failed: {e}") + raise RuntimeError("Failed to create a socket") + + +def stop_etcd(subprocess, data_dir: Optional[str] = None): + if subprocess and subprocess.poll() is None: + log.info("stopping etcd server") + subprocess.terminate() + subprocess.wait() + + if data_dir: + log.info("deleting etcd data dir: %s", data_dir) + shutil.rmtree(data_dir, ignore_errors=True) + + +class EtcdServer: + """ + .. note:: tested on etcd server v3.4.3. + + Starts and stops a local standalone etcd server on a random free + port. Useful for single node, multi-worker launches or testing, + where a sidecar etcd server is more convenient than having to + separately setup an etcd server. + + This class registers a termination handler to shutdown the etcd + subprocess on exit. This termination handler is NOT a substitute for + calling the ``stop()`` method. + + The following fallback mechanism is used to find the etcd binary: + + 1. Uses env var TORCHELASTIC_ETCD_BINARY_PATH + 2. Uses ``/bin/etcd`` if one exists + 3. Uses ``etcd`` from ``PATH`` + + Usage + :: + + server = EtcdServer("/usr/bin/etcd", 2379, "/tmp/default.etcd") + server.start() + client = server.get_client() + # use client + server.stop() + + Args: + etcd_binary_path: path of etcd server binary (see above for fallback path) + """ + + def __init__(self, data_dir: Optional[str] = None): + self._port = -1 + self._host = "localhost" + + root = os.path.dirname(__file__) + default_etcd_bin = os.path.join(root, "bin/etcd") + self._etcd_binary_path = os.environ.get( + "TORCHELASTIC_ETCD_BINARY_PATH", default_etcd_bin + ) + if not os.path.isfile(self._etcd_binary_path): + self._etcd_binary_path = "etcd" + + self._base_data_dir = ( + data_dir if data_dir else tempfile.mkdtemp(prefix="torchelastic_etcd_data") + ) + self._etcd_cmd = None + self._etcd_proc: Optional[subprocess.Popen] = None + + def _get_etcd_server_process(self) -> subprocess.Popen: + if not self._etcd_proc: + raise RuntimeError( + "No etcd server process started. Call etcd_server.start() first" + ) + else: + return self._etcd_proc + + def get_port(self) -> int: + """Return the port the server is running on.""" + return self._port + + def get_host(self) -> str: + """Return the host the server is running on.""" + return self._host + + def get_endpoint(self) -> str: + """Return the etcd server endpoint (host:port).""" + return f"{self._host}:{self._port}" + + def start( + self, + timeout: int = 60, + num_retries: int = 3, + stderr: Union[int, TextIO, None] = None, + ) -> None: + """ + Start the server, and waits for it to be ready. When this function returns the sever is ready to take requests. + + Args: + timeout: time (in seconds) to wait for the server to be ready + before giving up. + num_retries: number of retries to start the server. Each retry + will wait for max ``timeout`` before considering it as failed. + stderr: the standard error file handle. Valid values are + `subprocess.PIPE`, `subprocess.DEVNULL`, an existing file + descriptor (a positive integer), an existing file object, and + `None`. + + Raises: + TimeoutError: if the server is not ready within the specified timeout + """ + curr_retries = 0 + while True: + try: + data_dir = os.path.join(self._base_data_dir, str(curr_retries)) + os.makedirs(data_dir, exist_ok=True) + return self._start(data_dir, timeout, stderr) + except Exception as e: + curr_retries += 1 + stop_etcd(self._etcd_proc) + log.warning( + "Failed to start etcd server, got error: %s, retrying", str(e) + ) + if curr_retries >= num_retries: + shutil.rmtree(self._base_data_dir, ignore_errors=True) + raise + atexit.register(stop_etcd, self._etcd_proc, self._base_data_dir) + + def _start( + self, data_dir: str, timeout: int = 60, stderr: Union[int, TextIO, None] = None + ) -> None: + sock = find_free_port() + sock_peer = find_free_port() + self._port = sock.getsockname()[1] + peer_port = sock_peer.getsockname()[1] + + etcd_cmd = shlex.split( + " ".join( + [ + self._etcd_binary_path, + "--enable-v2", + "--data-dir", + data_dir, + "--listen-client-urls", + f"http://{self._host}:{self._port}", + "--advertise-client-urls", + f"http://{self._host}:{self._port}", + "--listen-peer-urls", + f"http://{self._host}:{peer_port}", + ] + ) + ) + + log.info("Starting etcd server: [%s]", etcd_cmd) + + sock.close() + sock_peer.close() + self._etcd_proc = subprocess.Popen(etcd_cmd, close_fds=True, stderr=stderr) + self._wait_for_ready(timeout) + + def get_client(self): + """Return an etcd client object that can be used to make requests to this server.""" + return etcd.Client( + host=self._host, port=self._port, version_prefix="/v2", read_timeout=10 + ) + + def _wait_for_ready(self, timeout: int = 60) -> None: + client = etcd.Client( + host=f"{self._host}", port=self._port, version_prefix="/v2", read_timeout=5 + ) + max_time = time.time() + timeout + + while time.time() < max_time: + if self._get_etcd_server_process().poll() is not None: + # etcd server process finished + exitcode = self._get_etcd_server_process().returncode + raise RuntimeError( + f"Etcd server process exited with the code: {exitcode}" + ) + try: + log.info("etcd server ready. version: %s", client.version) + return + except Exception: + time.sleep(1) + raise TimeoutError("Timed out waiting for etcd server to be ready!") + + def stop(self) -> None: + """Stop the server and cleans up auto generated resources (e.g. data dir).""" + log.info("EtcdServer stop method called") + stop_etcd(self._etcd_proc, self._base_data_dir) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/etcd_store.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/etcd_store.py new file mode 100644 index 0000000000000000000000000000000000000000..7690439237add63999cbbb0b5179043036d5b9c3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/etcd_store.py @@ -0,0 +1,204 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import datetime +import random +import time +from base64 import b64decode, b64encode +from typing import Optional + +import etcd # type: ignore[import] + +# pyre-ignore[21]: Could not find name `Store` in `torch.distributed`. +from torch.distributed import Store + + +# Delay (sleep) for a small random amount to reduce CAS failures. +# This does not affect correctness, but will reduce requests to etcd server. +def cas_delay(): + time.sleep(random.uniform(0, 0.1)) + + +# pyre-fixme[11]: Annotation `Store` is not defined as a type. +class EtcdStore(Store): + """ + Implement a c10 Store interface by piggybacking on the rendezvous etcd instance. + + This is the store object returned by ``EtcdRendezvous``. + """ + + def __init__( + self, + etcd_client, + etcd_store_prefix, + # Default timeout same as in c10d/Store.hpp + timeout: Optional[datetime.timedelta] = None, + ): + super().__init__() # required for pybind trampoline. + + self.client = etcd_client + self.prefix = etcd_store_prefix + + if timeout is not None: + self.set_timeout(timeout) + + if not self.prefix.endswith("/"): + self.prefix += "/" + + def set(self, key, value): + """ + Write a key/value pair into ``EtcdStore``. + + Both key and value may be either Python ``str`` or ``bytes``. + """ + self.client.set(key=self.prefix + self._encode(key), value=self._encode(value)) + + def get(self, key) -> bytes: + """ + Get a value by key, possibly doing a blocking wait. + + If key is not immediately present, will do a blocking wait + for at most ``timeout`` duration or until the key is published. + + + Returns: + value ``(bytes)`` + + Raises: + LookupError - If key still not published after timeout + """ + b64_key = self.prefix + self._encode(key) + kvs = self._try_wait_get([b64_key]) + + if kvs is None: + raise LookupError(f"Key {key} not found in EtcdStore") + + return self._decode(kvs[b64_key]) + + def add(self, key, num: int) -> int: + """ + Atomically increment a value by an integer amount. + + The integer is represented as a string using base 10. If key is not present, + a default value of ``0`` will be assumed. + + Returns: + the new (incremented) value + + + """ + b64_key = self._encode(key) + # c10d Store assumes value is an integer represented as a decimal string + try: + # Assume default value "0", if this key didn't yet: + node = self.client.write( + key=self.prefix + b64_key, + value=self._encode(str(num)), # i.e. 0 + num + prevExist=False, + ) + return int(self._decode(node.value)) + except etcd.EtcdAlreadyExist: + pass + + while True: + # Note: c10d Store does not have a method to delete keys, so we + # can be sure it's still there. + node = self.client.get(key=self.prefix + b64_key) + new_value = self._encode(str(int(self._decode(node.value)) + num)) + try: + node = self.client.test_and_set( + key=node.key, value=new_value, prev_value=node.value + ) + return int(self._decode(node.value)) + except etcd.EtcdCompareFailed: + cas_delay() + + def wait(self, keys, override_timeout: Optional[datetime.timedelta] = None): + """ + Wait until all of the keys are published, or until timeout. + + Raises: + LookupError - if timeout occurs + """ + b64_keys = [self.prefix + self._encode(key) for key in keys] + kvs = self._try_wait_get(b64_keys, override_timeout) + if kvs is None: + raise LookupError("Timeout while waiting for keys in EtcdStore") + # No return value on success + + def check(self, keys) -> bool: + """Check if all of the keys are immediately present (without waiting).""" + b64_keys = [self.prefix + self._encode(key) for key in keys] + kvs = self._try_wait_get( + b64_keys, + override_timeout=datetime.timedelta(microseconds=1), # as if no wait + ) + return kvs is not None + + # + # Encode key/value data in base64, so we can store arbitrary binary data + # in EtcdStore. Input can be `str` or `bytes`. + # In case of `str`, utf-8 encoding is assumed. + # + def _encode(self, value) -> str: + if type(value) == bytes: + return b64encode(value).decode() + elif type(value) == str: + return b64encode(value.encode()).decode() + raise ValueError("Value must be of type str or bytes") + + # + # Decode a base64 string (of type `str` or `bytes`). + # Return type is `bytes`, which is more convenient with the Store interface. + # + def _decode(self, value) -> bytes: + if type(value) == bytes: + return b64decode(value) + elif type(value) == str: + return b64decode(value.encode()) + raise ValueError("Value must be of type str or bytes") + + # + # Get all of the (base64-encoded) etcd keys at once, or wait until all the keys + # are published or timeout occurs. + # This is a helper method for the public interface methods. + # + # On success, a dictionary of {etcd key -> etcd value} is returned. + # On timeout, None is returned. + # + def _try_wait_get(self, b64_keys, override_timeout=None): + timeout = self.timeout if override_timeout is None else override_timeout # type: ignore[attr-defined] + deadline = time.time() + timeout.total_seconds() + + while True: + # Read whole directory (of keys), filter only the ones waited for + all_nodes = self.client.get(key=self.prefix) + req_nodes = { + node.key: node.value for node in all_nodes.children if node.key in b64_keys + } + + if len(req_nodes) == len(b64_keys): + # All keys are available + return req_nodes + + watch_timeout = deadline - time.time() + if watch_timeout <= 0: + return None + + try: + self.client.watch( + key=self.prefix, + recursive=True, + timeout=watch_timeout, + index=all_nodes.etcd_index + 1, + ) + except etcd.EtcdWatchTimedOut: + if time.time() >= deadline: + return None + else: + continue + except etcd.EtcdEventIndexCleared: + continue diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/registry.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/registry.py new file mode 100644 index 0000000000000000000000000000000000000000..eaa5bcfd80e247fcb8497999d2350005dd8134a5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/registry.py @@ -0,0 +1,66 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from .api import RendezvousHandler, RendezvousParameters +from .api import rendezvous_handler_registry as handler_registry +from .dynamic_rendezvous import create_handler + +__all__ = ['get_rendezvous_handler'] + +def _create_static_handler(params: RendezvousParameters) -> RendezvousHandler: + from . import static_tcp_rendezvous + + return static_tcp_rendezvous.create_rdzv_handler(params) + + +def _create_etcd_handler(params: RendezvousParameters) -> RendezvousHandler: + from . import etcd_rendezvous + + return etcd_rendezvous.create_rdzv_handler(params) + + +def _create_etcd_v2_handler(params: RendezvousParameters) -> RendezvousHandler: + from .etcd_rendezvous_backend import create_backend + + backend, store = create_backend(params) + + return create_handler(store, backend, params) + + +def _create_c10d_handler(params: RendezvousParameters) -> RendezvousHandler: + from .c10d_rendezvous_backend import create_backend + + backend, store = create_backend(params) + + return create_handler(store, backend, params) + + +def _register_default_handlers() -> None: + handler_registry.register("etcd", _create_etcd_handler) + handler_registry.register("etcd-v2", _create_etcd_v2_handler) + handler_registry.register("c10d", _create_c10d_handler) + handler_registry.register("static", _create_static_handler) + + +def get_rendezvous_handler(params: RendezvousParameters) -> RendezvousHandler: + """ + Obtain a reference to a :py:class`RendezvousHandler`. + + Custom rendezvous handlers can be registered by + + :: + + from torch.distributed.elastic.rendezvous import rendezvous_handler_registry + from torch.distributed.elastic.rendezvous.registry import get_rendezvous_handler + + def create_my_rdzv(params: RendezvousParameters): + return MyCustomRdzv(params) + + rendezvous_handler_registry.register("my_rdzv_backend_name", create_my_rdzv) + + my_rdzv_handler = get_rendezvous_handler("my_rdzv_backend_name", RendezvousParameters) + """ + return handler_registry.create_handler(params) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/static_tcp_rendezvous.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/static_tcp_rendezvous.py new file mode 100644 index 0000000000000000000000000000000000000000..fe600213bcc89d34d2cba4021ff6989a7bad96ba --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/static_tcp_rendezvous.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import datetime +import logging +from typing import Tuple, cast, Optional + +# pyre-ignore[21]: Could not find name `Store` in `torch.distributed`. +from torch.distributed import Store, TCPStore, PrefixStore +from torch.distributed.elastic.rendezvous import RendezvousHandler, RendezvousParameters +from torch.distributed.elastic.rendezvous.utils import parse_rendezvous_endpoint + +log = logging.getLogger(__name__) + +_default_timeout_seconds = 600 + + +class StaticTCPRendezvous(RendezvousHandler): + """ + Static rendezvous that is a wrapper around the TCPStore. + + Creates TCPStore based on the input parameters with the + listener on the agent with group_rank=0 + """ + + def __init__( + self, + master_addr: str, + master_port: int, + rank: int, + world_size: int, + run_id: str, + timeout: int, + ): + self.master_addr = master_addr + self.master_port = master_port + self.rank = rank + self.world_size = world_size + self.run_id = run_id + self.timeout = datetime.timedelta(seconds=timeout) + self._store: Optional[Store] = None + + def get_backend(self) -> str: + return "static" + + def next_rendezvous(self) -> Tuple[Store, int, int]: + log.info("Creating TCPStore as the c10d::Store implementation") + if not self._store: + is_master = self.rank == 0 + self._store = TCPStore( # type: ignore[call-arg] + self.master_addr, + self.master_port, + self.world_size, + is_master, + self.timeout, + multi_tenant=True, + ) + store = PrefixStore(self.run_id, self._store) + return store, self.rank, self.world_size + + def is_closed(self): + return False + + def set_closed(self): + pass + + def num_nodes_waiting(self): + return 0 + + def get_run_id(self) -> str: + return self.run_id + + def shutdown(self) -> bool: + return True + + +def create_rdzv_handler(params: RendezvousParameters) -> RendezvousHandler: + if "rank" not in params.config: + raise ValueError( + "rank is absent in RendezvousParameters." + "Try add --node-rank to the cmd request" + ) + endpoint = params.endpoint.strip() + if not endpoint: + raise ValueError( + "endpoint is absent in RendezvousParameters" + "Try add --master-port and --master-addr to the cmd request" + ) + master_addr, master_port = parse_rendezvous_endpoint(endpoint, -1) + if master_port == -1: + raise ValueError( + f"Port is absent in endpoint: {endpoint}. Try launching with --master-port" + ) + world_size = params.max_nodes + rank = cast(int, params.config.get("rank")) + run_id = params.run_id + if "timeout" in params.config: + timeout = int(params.config["timeout"]) + else: + timeout = _default_timeout_seconds + return StaticTCPRendezvous( + master_addr, master_port, rank, world_size, run_id, timeout + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/utils.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..326bc604a91444f14416954cf376718fe613f9db --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/utils.py @@ -0,0 +1,279 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import ipaddress +import random +import re +import socket +import time +import weakref +from datetime import timedelta +from threading import Event, Thread +from typing import Any, Callable, Dict, Optional, Tuple, Union + +__all__ = ['parse_rendezvous_endpoint'] + +def _parse_rendezvous_config(config_str: str) -> Dict[str, str]: + """Extract key-value pairs from a rendezvous configuration string. + + Args: + config_str: + A string in format =,...,=. + """ + config: Dict[str, str] = {} + + config_str = config_str.strip() + if not config_str: + return config + + key_values = config_str.split(",") + for kv in key_values: + key, *values = kv.split("=", 1) + + key = key.strip() + if not key: + raise ValueError( + "The rendezvous configuration string must be in format " + "=,...,=." + ) + + value: Optional[str] + if values: + value = values[0].strip() + else: + value = None + if not value: + raise ValueError( + f"The rendezvous configuration option '{key}' must have a value specified." + ) + + config[key] = value + return config + + +def _try_parse_port(port_str: str) -> Optional[int]: + """Try to extract the port number from ``port_str``.""" + if port_str and re.match(r"^[0-9]{1,5}$", port_str): + return int(port_str) + return None + + +def parse_rendezvous_endpoint(endpoint: Optional[str], default_port: int) -> Tuple[str, int]: + """Extract the hostname and the port number from a rendezvous endpoint. + + Args: + endpoint: + A string in format [:]. + default_port: + The port number to use if the endpoint does not include one. + + Returns: + A tuple of hostname and port number. + """ + if endpoint is not None: + endpoint = endpoint.strip() + + if not endpoint: + return ("localhost", default_port) + + # An endpoint that starts and ends with brackets represents an IPv6 address. + if endpoint[0] == "[" and endpoint[-1] == "]": + host, *rest = endpoint, *[] + else: + host, *rest = endpoint.rsplit(":", 1) + + # Sanitize the IPv6 address. + if len(host) > 1 and host[0] == "[" and host[-1] == "]": + host = host[1:-1] + + if len(rest) == 1: + port = _try_parse_port(rest[0]) + if port is None or port >= 2 ** 16: + raise ValueError( + f"The port number of the rendezvous endpoint '{endpoint}' must be an integer " + "between 0 and 65536." + ) + else: + port = default_port + + if not re.match(r"^[\w\.:-]+$", host): + raise ValueError( + f"The hostname of the rendezvous endpoint '{endpoint}' must be a dot-separated list of " + "labels, an IPv4 address, or an IPv6 address." + ) + + return host, port + + +def _matches_machine_hostname(host: str) -> bool: + """Indicate whether ``host`` matches the hostname of this machine. + + This function compares ``host`` to the hostname as well as to the IP + addresses of this machine. Note that it may return a false negative if this + machine has CNAME records beyond its FQDN or IP addresses assigned to + secondary NICs. + """ + if host == "localhost": + return True + + try: + addr = ipaddress.ip_address(host) + except ValueError: + addr = None + + if addr and addr.is_loopback: + return True + + try: + host_addr_list = socket.getaddrinfo( + host, None, proto=socket.IPPROTO_TCP, flags=socket.AI_CANONNAME + ) + except (ValueError, socket.gaierror) as _: + host_addr_list = [] + + host_ip_list = [ + host_addr_info[4][0] + for host_addr_info in host_addr_list + ] + + this_host = socket.gethostname() + if host == this_host: + return True + + addr_list = socket.getaddrinfo( + this_host, None, proto=socket.IPPROTO_TCP, flags=socket.AI_CANONNAME + ) + for addr_info in addr_list: + # If we have an FQDN in the addr_info, compare it to `host`. + if addr_info[3] and addr_info[3] == host: + return True + + # Otherwise if `host` represents an IP address, compare it to our IP + # address. + if addr and addr_info[4][0] == str(addr): + return True + + # If the IP address matches one of the provided host's IP addresses + if addr_info[4][0] in host_ip_list: + return True + + return False + + +def _delay(seconds: Union[float, Tuple[float, float]]) -> None: + """Suspend the current thread for ``seconds``. + + Args: + seconds: + Either the delay, in seconds, or a tuple of a lower and an upper + bound within which a random delay will be picked. + """ + if isinstance(seconds, tuple): + seconds = random.uniform(*seconds) + # Ignore delay requests that are less than 10 milliseconds. + if seconds >= 0.01: + time.sleep(seconds) + + +class _PeriodicTimer: + """Represent a timer that periodically runs a specified function. + + Args: + interval: + The interval, in seconds, between each run. + function: + The function to run. + """ + + # The state of the timer is hold in a separate context object to avoid a + # reference cycle between the timer and the background thread. + class _Context: + interval: float + function: Callable[..., None] + args: Tuple[Any, ...] + kwargs: Dict[str, Any] + stop_event: Event + + _name: Optional[str] + _thread: Optional[Thread] + _finalizer: Optional[weakref.finalize] + + # The context that is shared between the timer and the background thread. + _ctx: _Context + + def __init__( + self, + interval: timedelta, + function: Callable[..., None], + *args: Any, + **kwargs: Any, + ) -> None: + self._name = None + + self._ctx = self._Context() + self._ctx.interval = interval.total_seconds() + self._ctx.function = function # type: ignore[assignment] + self._ctx.args = args or () + self._ctx.kwargs = kwargs or {} + self._ctx.stop_event = Event() + + self._thread = None + self._finalizer = None + + @property + def name(self) -> Optional[str]: + """Get the name of the timer.""" + return self._name + + def set_name(self, name: str) -> None: + """Set the name of the timer. + + The specified name will be assigned to the background thread and serves + for debugging and troubleshooting purposes. + """ + if self._thread: + raise RuntimeError("The timer has already started.") + + self._name = name + + def start(self) -> None: + """Start the timer.""" + if self._thread: + raise RuntimeError("The timer has already started.") + + self._thread = Thread( + target=self._run, name=self._name or "PeriodicTimer", args=(self._ctx,), daemon=True + ) + + # We avoid using a regular finalizer (a.k.a. __del__) for stopping the + # timer as joining a daemon thread during the interpreter shutdown can + # cause deadlocks. The weakref.finalize is a superior alternative that + # provides a consistent behavior regardless of the GC implementation. + self._finalizer = weakref.finalize( + self, self._stop_thread, self._thread, self._ctx.stop_event + ) + + # We do not attempt to stop our background thread during the interpreter + # shutdown. At that point we do not even know whether it still exists. + self._finalizer.atexit = False + + self._thread.start() + + def cancel(self) -> None: + """Stop the timer at the next opportunity.""" + if self._finalizer: + self._finalizer() + + @staticmethod + def _run(ctx) -> None: + while not ctx.stop_event.wait(ctx.interval): + ctx.function(*ctx.args, **ctx.kwargs) + + @staticmethod + def _stop_thread(thread, stop_event): + stop_event.set() + + thread.join() diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/timer/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/timer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ea4b2a46c4231dcec6f2b99af677b6979083b4b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/timer/__init__.py @@ -0,0 +1,44 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +""" +Expiration timers are set up on the same process as the agent and +used from your script to deal with stuck workers. When you go into +a code-block that has the potential to get stuck you can acquire +an expiration timer, which instructs the timer server to kill the +process if it does not release the timer by the self-imposed expiration +deadline. + +Usage:: + + import torchelastic.timer as timer + import torchelastic.agent.server as agent + + def main(): + start_method = "spawn" + message_queue = mp.get_context(start_method).Queue() + server = timer.LocalTimerServer(message, max_interval=0.01) + server.start() # non-blocking + + spec = WorkerSpec( + fn=trainer_func, + args=(message_queue,), + ...) + agent = agent.LocalElasticAgent(spec, start_method) + agent.run() + + def trainer_func(message_queue): + timer.configure(timer.LocalTimerClient(message_queue)) + with timer.expires(after=60): # 60 second expiry + # do some work + +In the example above if ``trainer_func`` takes more than 60 seconds to +complete, then the worker process is killed and the agent retries the worker group. +""" + +from .api import TimerClient, TimerRequest, TimerServer, configure, expires # noqa: F401 +from .local_timer import LocalTimerClient, LocalTimerServer # noqa: F401 +from .file_based_local_timer import FileTimerClient, FileTimerServer, FileTimerRequest # noqa: F401 diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ce2bbf5bbe2348bb0eaa411a034710dd14f7648e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/__init__.py @@ -0,0 +1,9 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from .api import get_env_variable_or_raise, get_socket_with_port, macros # noqa: F401 diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05e209ad4609f3f4f38b73d7400216b01b00d47f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1efa29847065284e9836b9036c1258fb683f70fb Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/distributed.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/distributed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00d2bfb61549c6e849d70f3028fb1a9570cd4f26 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/distributed.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/log_level.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/log_level.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82f7a7d8a1e4b7de8eb47c5e623442a57aef64d2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/log_level.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/logging.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/logging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..987850945b04aba5232c6fda29d1518e45d95afe Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/logging.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/store.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/store.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3485c82e6c0fc0b34e6dc7450b7f76ba0ab9e6d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/store.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/api.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/api.py new file mode 100644 index 0000000000000000000000000000000000000000..e0607e9c0d5dc725a38083a9a4f1cc24feed3b14 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/api.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import os +import socket +from string import Template +from typing import List, Any + + +def get_env_variable_or_raise(env_name: str) -> str: + r""" + Tries to retrieve environment variable. Raises ``ValueError`` + if no environment variable found. + + Args: + env_name (str): Name of the env variable + """ + value = os.environ.get(env_name, None) + if value is None: + msg = f"Environment variable {env_name} expected, but not set" + raise ValueError(msg) + return value + + +def get_socket_with_port() -> socket.socket: + addrs = socket.getaddrinfo( + host="localhost", port=None, family=socket.AF_UNSPEC, type=socket.SOCK_STREAM + ) + for addr in addrs: + family, type, proto, _, _ = addr + s = socket.socket(family, type, proto) + try: + s.bind(("localhost", 0)) + s.listen(0) + return s + except OSError as e: + s.close() + raise RuntimeError("Failed to create a socket") + + +class macros: + """ + Defines simple macros for caffe2.distributed.launch cmd args substitution + """ + + local_rank = "${local_rank}" + + @staticmethod + def substitute(args: List[Any], local_rank: str) -> List[str]: + args_sub = [] + for arg in args: + if isinstance(arg, str): + sub = Template(arg).safe_substitute(local_rank=local_rank) + args_sub.append(sub) + else: + args_sub.append(arg) + return args_sub diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6c39bca6f3c8a31f5f2d7115ad12c1fc4925fe1d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/__init__.py @@ -0,0 +1,10 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from .cycling_iterator import CyclingIterator # noqa: F401 +from .elastic_distributed_sampler import ElasticDistributedSampler # noqa: F401 diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b94a9e32fcf6e9faa68a8dc6be39d5b57bd8553 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/__pycache__/cycling_iterator.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/__pycache__/cycling_iterator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd2d9a7d266550a4a0014cd0d7fb98ad17bdedd8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/__pycache__/cycling_iterator.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/__pycache__/elastic_distributed_sampler.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/__pycache__/elastic_distributed_sampler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..716f6849ed4fb49fa196ec2150e00fd28c444745 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/__pycache__/elastic_distributed_sampler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/cycling_iterator.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/cycling_iterator.py new file mode 100644 index 0000000000000000000000000000000000000000..60a5861f7befdebd5ed80e97151e267ce3523945 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/cycling_iterator.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + + +class CyclingIterator: + """ + An iterator decorator that cycles through the + underlying iterator "n" times. Useful to "unroll" + the dataset across multiple training epochs. + + The generator function is called as ``generator_fn(epoch)`` + to obtain the underlying iterator, where ``epoch`` is a + number less than or equal to ``n`` representing the ``k``th cycle + + For example if ``generator_fn`` always returns ``[1,2,3]`` + then ``CyclingIterator(n=2, generator_fn)`` will iterate through + ``[1,2,3,1,2,3]`` + """ + + def __init__(self, n: int, generator_fn, start_epoch=0): + self._n = n + self._epoch = start_epoch + self._generator_fn = generator_fn + self._iter = generator_fn(self._epoch) + + def __iter__(self): + return self + + def __next__(self): + try: + return next(self._iter) + except StopIteration as eod: # eod == end of data + if self._epoch < self._n - 1: + self._epoch += 1 + self._iter = self._generator_fn(self._epoch) + return self.__next__() + else: + raise eod diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/elastic_distributed_sampler.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/elastic_distributed_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..a66803fa8c099a8768d7858f038736d87cb76781 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/elastic_distributed_sampler.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import math + +import torch +from torch.utils.data.distributed import DistributedSampler + + +class ElasticDistributedSampler(DistributedSampler): + """ + Sampler that restricts data loading to a subset of + the dataset for elastic training. + + It is especially useful in conjunction with + :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each + process can pass a DistributedSampler instance as a DataLoader sampler, + and load a subset of the original dataset that is exclusive to it. + + .. note:: + Dataset is assumed to be of constant size. + + Args: + dataset: Dataset used for sampling. + num_replicas (optional): Number of processes participating in + distributed training. + rank (optional): Rank of the current process within num_replicas. + start_index (optional): Which index of the dataset to start sampling from + """ + + def __init__(self, dataset, num_replicas=None, rank=None, start_index=0): + super().__init__(dataset=dataset, num_replicas=num_replicas, rank=rank) + if start_index >= len(dataset): + raise ValueError( + f"Start index {start_index} should be less than dataset size {len(dataset)}" + ) + + self.start_index = start_index + self.num_samples = int( + math.ceil(float(len(self.dataset) - self.start_index) / self.num_replicas) # type: ignore[arg-type] + ) + self.total_size = self.num_samples * self.num_replicas + + def __iter__(self): + # deterministically shuffle based on epoch + g = torch.Generator() + g.manual_seed(self.epoch) + indices = ( + torch.randperm(len(self.dataset) - self.start_index, generator=g) # type: ignore[arg-type] + .add(self.start_index) + .tolist() + ) + + # add extra samples to make it evenly divisible + indices += indices[: (self.total_size - len(indices))] + assert len(indices) == self.total_size + + # subsample + indices = indices[self.rank : self.total_size : self.num_replicas] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self): + return self.num_samples diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/distributed.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/distributed.py new file mode 100644 index 0000000000000000000000000000000000000000..d8b9ac2fac986049809d62ffa8535981cb7ca0a7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/distributed.py @@ -0,0 +1,144 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +import datetime +import socket +from contextlib import closing + +import torch.distributed as dist +from torch.distributed.elastic.utils.logging import get_logger + + +log = get_logger(__name__) + +_ADDRESS_IN_USE = "Address already in use" +_SOCKET_TIMEOUT = "Socket Timeout" + +_MEMBER_CHECKIN = "_tcp_store/num_members" +_LAST_MEMBER_CHECKIN = "_tcp_store/last_member" + + +def create_c10d_store( + is_server: bool, + server_addr: str, + server_port: int = -1, + world_size: int = 1, + timeout: float = (60 * 10), # 10 min + wait_for_workers: bool = True, + retries=3, +): + if server_port == -1 and world_size > 1: + raise ValueError( + f"server_port must be specified when world_size > 1, got server_port={server_port}, world_size={world_size}" + ) + + if server_port != -1: + log.info("sever_port: %s, specified, ignoring retries", server_port) + + # only retry when server_port is NOT static + attempt = retries if server_port == -1 else 1 + while True: + if server_port != -1: + port = server_port + else: + port = get_free_port() + + log.info( + "Creating c10d store on %s:%s\n" + " world_size : %s\n" + " is_server : %s\n" + " timeout(sec): %s\n", + server_addr, port, world_size, is_server, timeout + ) + + try: + store = dist.TCPStore( + host_name=server_addr, + port=port, + world_size=world_size, + is_master=is_server, + timeout=datetime.timedelta(seconds=timeout), + wait_for_workers=wait_for_workers, + ) + # skips full rank check when we don't have to wait for all workers + if wait_for_workers: + _check_full_rank(store, world_size) + log.info("Successfully created c10d store") + return store + except RuntimeError as e: + # this is brittle, but the underlying exception type is not properly pybinded + # so we parse the error msg for now, interestingly this is how torch itself + # detects timeouts and port conflicts in their own unittests + # see - caffe2/torch/testing/_internal/common_utils.py + # TODO properly map the exceptions in pybind (c10d/init.cpp) + if str(e) == _ADDRESS_IN_USE: # this will only happen on the server + if attempt < retries: + log.warning( + "port: %s already in use, attempt: [%s/%s]", port, attempt, retries + ) + attempt += 1 + else: + raise RuntimeError( + f"on {server_addr}, port: {port} already in use" + ) from e + else: + raise + + +def _check_full_rank(store, world_size): + idx = store.add(_MEMBER_CHECKIN, 1) + if idx == world_size: + store.set(_LAST_MEMBER_CHECKIN, "") + + try: + store.get(_LAST_MEMBER_CHECKIN) + except RuntimeError as e: + if str(e) == _SOCKET_TIMEOUT: + raise TimeoutError( + f"timed out waiting for all {world_size} members to join" + ) from e + else: + raise + + +def get_free_port(): + sock = get_socket_with_port() + with closing(sock): + return sock.getsockname()[1] + + +def get_socket_with_port() -> socket.socket: + """ + Returns a free port on localhost that is "reserved" by binding a temporary + socket on it. Close the socket before passing the port to the entity + that requires it. Usage example + + :: + + sock = _get_socket_with_port() + with closing(sock): + port = sock.getsockname()[1] + sock.close() + # there is still a race-condition that some other process + # may grab this port before func() runs + func(port) + """ + + addrs = socket.getaddrinfo( + host="localhost", port=None, family=socket.AF_UNSPEC, type=socket.SOCK_STREAM + ) + for addr in addrs: + family, type, proto, _, _ = addr + s = socket.socket(family, type, proto) + try: + s.bind(("localhost", 0)) + s.listen(0) + return s + except OSError as e: + s.close() + log.info("Socket creation attempt failed.", exc_info=e) + raise RuntimeError("Failed to create a socket") diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/log_level.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/log_level.py new file mode 100644 index 0000000000000000000000000000000000000000..87ea0f7d64182488b40fd7fed6965ce57ec475a0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/log_level.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + + +def get_log_level() -> str: + """ + Return default log level for pytorch. + """ + return "WARNING" diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/logging.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..e4f1345e4c33993cb6cf020bd612ab28ff86c59a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/logging.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import inspect +import logging +import os +import warnings +from typing import Optional + +from torch.distributed.elastic.utils.log_level import get_log_level + + +def get_logger(name: Optional[str] = None): + """ + Util function to set up a simple logger that writes + into stderr. The loglevel is fetched from the LOGLEVEL + env. variable or WARNING as default. The function will use the + module name of the caller if no name is provided. + + Args: + name: Name of the logger. If no name provided, the name will + be derived from the call stack. + """ + + # Derive the name of the caller, if none provided + # Use depth=2 since this function takes up one level in the call stack + return _setup_logger(name or _derive_module_name(depth=2)) + + +def _setup_logger(name: Optional[str] = None): + log = logging.getLogger(name) + log.setLevel(os.environ.get("LOGLEVEL", get_log_level())) + return log + + +def _derive_module_name(depth: int = 1) -> Optional[str]: + """ + Derives the name of the caller module from the stack frames. + + Args: + depth: The position of the frame in the stack. + """ + try: + stack = inspect.stack() + assert depth < len(stack) + # FrameInfo is just a named tuple: (frame, filename, lineno, function, code_context, index) + frame_info = stack[depth] + + module = inspect.getmodule(frame_info[0]) + if module: + module_name = module.__name__ + else: + # inspect.getmodule(frame_info[0]) does NOT work (returns None) in + # binaries built with @mode/opt + # return the filename (minus the .py extension) as modulename + filename = frame_info[1] + module_name = os.path.splitext(os.path.basename(filename))[0] + return module_name + except Exception as e: + warnings.warn( + f"Error deriving logger module name, using . Exception: {e}", + RuntimeWarning, + ) + return None diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/store.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/store.py new file mode 100644 index 0000000000000000000000000000000000000000..9c7abab9291c617b2851d8cea4a32cfd19413ed4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/utils/store.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from datetime import timedelta +from typing import List + + +def get_all(store, rank: int, prefix: str, size: int): + r""" + Given a store and a prefix, the method goes through the array of keys + of the following format: ``{prefix}{idx}``, where idx is in a range + from 0 to size, and tries to retrieve the data. + + The Rank0 process waits at the end to make sure all other processes + finished the procedure before exiting. + + Usage + + :: + + values = get_all(store, 'torchelastic/data', 3) + value1 = values[0] # retrieves the data for key torchelastic/data0 + value2 = values[1] # retrieves the data for key torchelastic/data1 + value3 = values[2] # retrieves the data for key torchelastic/data2 + + """ + data_arr = [] + for idx in range(size): + data = store.get(f"{prefix}{idx}") + data_arr.append(data) + store.set(f"{prefix}{rank}.FIN", b"FIN") + if rank == 0: + # Rank0 runs the TCPStore daemon, as a result it needs to exit last. + # Otherwise, the barrier may timeout if rank0 process finished the work + # before other processes finished `get_all` method + for node_rank in range(size): + store.get(f"{prefix}{node_rank}.FIN") + + return data_arr + + +def synchronize( + store, + data: bytes, + rank: int, + world_size: int, + key_prefix: str, + barrier_timeout: float = 300, +) -> List[bytes]: + """ + Synchronizes ``world_size`` agents between each other using the underlying c10d store. + The ``data`` will be available on each of the agents. + + Note: The data on the path is not deleted, as a result there can be stale data if + you use the same key_prefix twice. + """ + store.set_timeout(timedelta(seconds=barrier_timeout)) + store.set(f"{key_prefix}{rank}", data) + agent_data = get_all(store, rank, key_prefix, world_size) + return agent_data + + +def barrier( + store, rank: int, world_size: int, key_prefix: str, barrier_timeout: float = 300 +) -> None: + """ + A global lock between agents. + + Note: Since the data is not removed from the store, the barrier can be used + once per unique ``key_prefix``. + """ + data = f"{rank}".encode() + synchronize(store, data, rank, world_size, key_prefix, barrier_timeout)