Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step120/zero/20.input_layernorm.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step120/zero/26.attention.dense.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step120/zero/6.attention.dense.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step120/zero/6.attention.dense.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step120/zero/6.attention.dense.weight/fp32.pt +3 -0
- ckpts/universal/global_step120/zero/7.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step120/zero/7.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/api.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/local_elastic_agent.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/events/__init__.py +134 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/api.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/handlers.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/events/api.py +112 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/events/handlers.py +22 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__init__.py +163 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__pycache__/api.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/metrics/api.py +201 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__init__.py +235 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/api.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/redirects.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/tail_log.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/api.py +873 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py +375 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/error_handler.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/handlers.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/error_handler.py +159 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/handlers.py +16 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/redirects.py +102 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__init__.py +15 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/handlers.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/subprocess_handler.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/handlers.py +32 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/subprocess_handler.py +78 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/tail_log.py +153 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__init__.py +150 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/api.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/c10d_rendezvous_backend.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/dynamic_rendezvous.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_rendezvous.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_rendezvous_backend.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_server.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_store.cpython-310.pyc +0 -0
ckpts/universal/global_step120/zero/20.input_layernorm.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8a1c89e3ff5511b8fa237a7bba96056bab5abff289d3c9d0f1c229fd0d3419d0
|
3 |
+
size 9372
|
ckpts/universal/global_step120/zero/26.attention.dense.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1b819bb73fe44f2e29901c6f9c69ae883dcabf3ea541886f97df942ac35fef34
|
3 |
+
size 16778396
|
ckpts/universal/global_step120/zero/6.attention.dense.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7eaab1a7e808df17a1d34eefeac1ee7b7eb59defa7f345ebd3f3c07b25e4b942
|
3 |
+
size 16778396
|
ckpts/universal/global_step120/zero/6.attention.dense.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3e9031785271cec36db29f863f61e9029a4f14e4d7537c671120d928f317f383
|
3 |
+
size 16778411
|
ckpts/universal/global_step120/zero/6.attention.dense.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:58ef333cc07c1471ee20910b212dadb8abefc865e1669d89095e2e27e4012369
|
3 |
+
size 16778317
|
ckpts/universal/global_step120/zero/7.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bd79f294691648c9aa9fe0907d4dbb6b941e0c291b598b7c3934672d1c75b917
|
3 |
+
size 33555612
|
ckpts/universal/global_step120/zero/7.mlp.dense_h_to_4h_swiglu.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:625299ec80a1d7781e1e3686abe78807fd2694fded23b0a2099a286777987d8b
|
3 |
+
size 33555533
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (3.62 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.42 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/api.cpython-310.pyc
ADDED
Binary file (31.8 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/local_elastic_agent.cpython-310.pyc
ADDED
Binary file (11.1 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/events/__init__.py
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env/python3
|
2 |
+
|
3 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
4 |
+
# All rights reserved.
|
5 |
+
#
|
6 |
+
# This source code is licensed under the BSD-style license found in the
|
7 |
+
# LICENSE file in the root directory of this source tree.
|
8 |
+
|
9 |
+
"""
|
10 |
+
Module contains events processing mechanisms that are integrated with the standard python logging.
|
11 |
+
|
12 |
+
Example of usage:
|
13 |
+
|
14 |
+
::
|
15 |
+
|
16 |
+
from torch.distributed.elastic import events
|
17 |
+
event = events.Event(name="test_event", source=events.EventSource.WORKER, metadata={...})
|
18 |
+
events.get_logging_handler(destination="console").info(event)
|
19 |
+
|
20 |
+
"""
|
21 |
+
|
22 |
+
import inspect
|
23 |
+
import logging
|
24 |
+
import os
|
25 |
+
import socket
|
26 |
+
import traceback
|
27 |
+
from enum import Enum
|
28 |
+
from typing import Dict, Optional
|
29 |
+
|
30 |
+
from torch.distributed.elastic.events.handlers import get_logging_handler
|
31 |
+
|
32 |
+
from .api import ( # noqa: F401
|
33 |
+
Event,
|
34 |
+
EventMetadataValue,
|
35 |
+
EventSource,
|
36 |
+
NodeState,
|
37 |
+
RdzvEvent,
|
38 |
+
)
|
39 |
+
|
40 |
+
_events_loggers: Dict[str, logging.Logger] = {}
|
41 |
+
|
42 |
+
def _get_or_create_logger(destination: str = "null") -> logging.Logger:
|
43 |
+
"""
|
44 |
+
Construct python logger based on the destination type or extends if provided.
|
45 |
+
|
46 |
+
Available destination could be found in ``handlers.py`` file.
|
47 |
+
The constructed logger does not propagate messages to the upper level loggers,
|
48 |
+
e.g. root logger. This makes sure that a single event can be processed once.
|
49 |
+
|
50 |
+
Args:
|
51 |
+
destination: The string representation of the event handler.
|
52 |
+
Available handlers found in ``handlers`` module
|
53 |
+
"""
|
54 |
+
global _events_loggers
|
55 |
+
|
56 |
+
if destination not in _events_loggers:
|
57 |
+
_events_logger = logging.getLogger(f"torchelastic-events-{destination}")
|
58 |
+
_events_logger.setLevel(os.environ.get("LOGLEVEL", "INFO"))
|
59 |
+
# Do not propagate message to the root logger
|
60 |
+
_events_logger.propagate = False
|
61 |
+
|
62 |
+
logging_handler = get_logging_handler(destination)
|
63 |
+
_events_logger.addHandler(logging_handler)
|
64 |
+
|
65 |
+
# Add the logger to the global dictionary
|
66 |
+
_events_loggers[destination] = _events_logger
|
67 |
+
|
68 |
+
return _events_loggers[destination]
|
69 |
+
|
70 |
+
|
71 |
+
def record(event: Event, destination: str = "null") -> None:
|
72 |
+
_get_or_create_logger(destination).info(event.serialize())
|
73 |
+
|
74 |
+
def record_rdzv_event(event: RdzvEvent) -> None:
|
75 |
+
_get_or_create_logger("dynamic_rendezvous").info(event.serialize())
|
76 |
+
|
77 |
+
|
78 |
+
def construct_and_record_rdzv_event(
|
79 |
+
run_id: str,
|
80 |
+
message: str,
|
81 |
+
node_state: NodeState,
|
82 |
+
name: str = "",
|
83 |
+
hostname: str = "",
|
84 |
+
pid: Optional[int] = None,
|
85 |
+
master_endpoint: str = "",
|
86 |
+
local_id: Optional[int] = None,
|
87 |
+
rank: Optional[int] = None,
|
88 |
+
) -> None:
|
89 |
+
# We don't want to perform an extra computation if not needed.
|
90 |
+
if isinstance(get_logging_handler("dynamic_rendezvous"), logging.NullHandler):
|
91 |
+
return
|
92 |
+
|
93 |
+
# Set up parameters.
|
94 |
+
if not hostname:
|
95 |
+
hostname = socket.getfqdn()
|
96 |
+
if not pid:
|
97 |
+
pid = os.getpid()
|
98 |
+
|
99 |
+
# Determines which file called this function.
|
100 |
+
callstack = inspect.stack()
|
101 |
+
filename = "no_file"
|
102 |
+
if len(callstack) > 1:
|
103 |
+
stack_depth_1 = callstack[1]
|
104 |
+
filename = os.path.basename(stack_depth_1.filename)
|
105 |
+
if not name:
|
106 |
+
name = stack_depth_1.function
|
107 |
+
|
108 |
+
# Delete the callstack variable. If kept, this can mess with python's
|
109 |
+
# garbage collector as we are holding on to stack frame information in
|
110 |
+
# the inspect module.
|
111 |
+
del callstack
|
112 |
+
|
113 |
+
# Set up error trace if this is an exception
|
114 |
+
if node_state == NodeState.FAILED:
|
115 |
+
error_trace = traceback.format_exc()
|
116 |
+
else:
|
117 |
+
error_trace = ""
|
118 |
+
|
119 |
+
# Initialize event object
|
120 |
+
event = RdzvEvent(
|
121 |
+
name=f"{filename}:{name}",
|
122 |
+
run_id=run_id,
|
123 |
+
message=message,
|
124 |
+
hostname=hostname,
|
125 |
+
pid=pid,
|
126 |
+
node_state=node_state,
|
127 |
+
master_endpoint=master_endpoint,
|
128 |
+
rank=rank,
|
129 |
+
local_id=local_id,
|
130 |
+
error_trace=error_trace,
|
131 |
+
)
|
132 |
+
|
133 |
+
# Finally, record the event.
|
134 |
+
record_rdzv_event(event)
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (3.12 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/api.cpython-310.pyc
ADDED
Binary file (3.81 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/handlers.cpython-310.pyc
ADDED
Binary file (591 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/events/api.py
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
|
3 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
4 |
+
# All rights reserved.
|
5 |
+
#
|
6 |
+
# This source code is licensed under the BSD-style license found in the
|
7 |
+
# LICENSE file in the root directory of this source tree.
|
8 |
+
|
9 |
+
import json
|
10 |
+
from dataclasses import asdict, dataclass, field
|
11 |
+
from enum import Enum
|
12 |
+
from typing import Dict, Union, Optional
|
13 |
+
|
14 |
+
__all__ = ['EventSource', 'Event', 'NodeState', 'RdzvEvent']
|
15 |
+
|
16 |
+
EventMetadataValue = Union[str, int, float, bool, None]
|
17 |
+
|
18 |
+
|
19 |
+
class EventSource(str, Enum):
|
20 |
+
"""Known identifiers of the event producers."""
|
21 |
+
|
22 |
+
AGENT = "AGENT"
|
23 |
+
WORKER = "WORKER"
|
24 |
+
|
25 |
+
|
26 |
+
@dataclass
|
27 |
+
class Event:
|
28 |
+
"""
|
29 |
+
The class represents the generic event that occurs during the torchelastic job execution.
|
30 |
+
|
31 |
+
The event can be any kind of meaningful action.
|
32 |
+
|
33 |
+
Args:
|
34 |
+
name: event name.
|
35 |
+
source: the event producer, e.g. agent or worker
|
36 |
+
timestamp: timestamp in milliseconds when event occurred.
|
37 |
+
metadata: additional data that is associated with the event.
|
38 |
+
"""
|
39 |
+
|
40 |
+
name: str
|
41 |
+
source: EventSource
|
42 |
+
timestamp: int = 0
|
43 |
+
metadata: Dict[str, EventMetadataValue] = field(default_factory=dict)
|
44 |
+
|
45 |
+
def __str__(self):
|
46 |
+
return self.serialize()
|
47 |
+
|
48 |
+
@staticmethod
|
49 |
+
def deserialize(data: Union[str, "Event"]) -> "Event":
|
50 |
+
if isinstance(data, Event):
|
51 |
+
return data
|
52 |
+
if isinstance(data, str):
|
53 |
+
data_dict = json.loads(data)
|
54 |
+
data_dict["source"] = EventSource[data_dict["source"]] # type: ignore[possibly-undefined]
|
55 |
+
return Event(**data_dict)
|
56 |
+
|
57 |
+
def serialize(self) -> str:
|
58 |
+
return json.dumps(asdict(self))
|
59 |
+
|
60 |
+
|
61 |
+
class NodeState(str, Enum):
|
62 |
+
"""The states that a node can be in rendezvous."""
|
63 |
+
|
64 |
+
INIT = "INIT"
|
65 |
+
RUNNING = "RUNNING"
|
66 |
+
SUCCEEDED = "SUCCEEDED"
|
67 |
+
FAILED = "FAILED"
|
68 |
+
|
69 |
+
|
70 |
+
@dataclass
|
71 |
+
class RdzvEvent:
|
72 |
+
"""
|
73 |
+
Dataclass to represent any rendezvous event.
|
74 |
+
|
75 |
+
Args:
|
76 |
+
name: Event name. (E.g. Current action being performed)
|
77 |
+
run_id: The run id of the rendezvous
|
78 |
+
message: The message describing the event
|
79 |
+
hostname: Hostname of the node
|
80 |
+
pid: The process id of the node
|
81 |
+
node_state: The state of the node (INIT, RUNNING, SUCCEEDED, FAILED)
|
82 |
+
master_endpoint: The master endpoint for the rendezvous store, if known
|
83 |
+
rank: The rank of the node, if known
|
84 |
+
local_id: The local_id of the node, if defined in dynamic_rendezvous.py
|
85 |
+
error_trace: Error stack trace, if this is an error event.
|
86 |
+
"""
|
87 |
+
|
88 |
+
name: str
|
89 |
+
run_id: str
|
90 |
+
message: str
|
91 |
+
hostname: str
|
92 |
+
pid: int
|
93 |
+
node_state: NodeState
|
94 |
+
master_endpoint: str = ""
|
95 |
+
rank: Optional[int] = None
|
96 |
+
local_id: Optional[int] = None
|
97 |
+
error_trace: str = ""
|
98 |
+
|
99 |
+
def __str__(self):
|
100 |
+
return self.serialize()
|
101 |
+
|
102 |
+
@staticmethod
|
103 |
+
def deserialize(data: Union[str, "RdzvEvent"]) -> "RdzvEvent":
|
104 |
+
if isinstance(data, RdzvEvent):
|
105 |
+
return data
|
106 |
+
if isinstance(data, str):
|
107 |
+
data_dict = json.loads(data)
|
108 |
+
data_dict["node_state"] = NodeState[data_dict["node_state"]] # type: ignore[possibly-undefined]
|
109 |
+
return RdzvEvent(**data_dict)
|
110 |
+
|
111 |
+
def serialize(self) -> str:
|
112 |
+
return json.dumps(asdict(self))
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/events/handlers.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
|
3 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
4 |
+
# All rights reserved.
|
5 |
+
#
|
6 |
+
# This source code is licensed under the BSD-style license found in the
|
7 |
+
# LICENSE file in the root directory of this source tree.
|
8 |
+
|
9 |
+
import logging
|
10 |
+
from typing import Dict
|
11 |
+
|
12 |
+
|
13 |
+
_log_handlers: Dict[str, logging.Handler] = {
|
14 |
+
"console": logging.StreamHandler(),
|
15 |
+
"dynamic_rendezvous": logging.NullHandler(),
|
16 |
+
"null": logging.NullHandler(),
|
17 |
+
}
|
18 |
+
|
19 |
+
|
20 |
+
def get_logging_handler(destination: str = "null") -> logging.Handler:
|
21 |
+
global _log_handlers
|
22 |
+
return _log_handlers[destination]
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__init__.py
ADDED
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env/python3
|
2 |
+
|
3 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
4 |
+
# All rights reserved.
|
5 |
+
#
|
6 |
+
# This source code is licensed under the BSD-style license found in the
|
7 |
+
# LICENSE file in the root directory of this source tree.
|
8 |
+
|
9 |
+
"""Metrics API.
|
10 |
+
|
11 |
+
**Overview**:
|
12 |
+
|
13 |
+
The metrics API in torchelastic is used to publish telemetry metrics.
|
14 |
+
It is designed to be used by torchelastic's internal modules to
|
15 |
+
publish metrics for the end user with the goal of increasing visibility
|
16 |
+
and helping with debugging. However you may use the same API in your
|
17 |
+
jobs to publish metrics to the same metrics ``sink``.
|
18 |
+
|
19 |
+
A ``metric`` can be thought of as timeseries data
|
20 |
+
and is uniquely identified by the string-valued tuple
|
21 |
+
``(metric_group, metric_name)``.
|
22 |
+
|
23 |
+
torchelastic makes no assumptions about what a ``metric_group`` is
|
24 |
+
and what relationship it has with ``metric_name``. It is totally up
|
25 |
+
to the user to use these two fields to uniquely identify a metric.
|
26 |
+
|
27 |
+
.. note:: The metric group ``torchelastic`` is reserved by torchelastic for
|
28 |
+
platform level metrics that it produces.
|
29 |
+
For instance torchelastic may output the latency (in milliseconds)
|
30 |
+
of a re-rendezvous operation from the agent as
|
31 |
+
``(torchelastic, agent.rendezvous.duration.ms)``
|
32 |
+
|
33 |
+
A sensible way to use metric groups is to map them to a stage or module
|
34 |
+
in your job. You may also encode certain high level properties
|
35 |
+
the job such as the region or stage (dev vs prod).
|
36 |
+
|
37 |
+
**Publish Metrics**:
|
38 |
+
|
39 |
+
Using torchelastic's metrics API is similar to using python's logging
|
40 |
+
framework. You first have to configure a metrics handler before
|
41 |
+
trying to add metric data.
|
42 |
+
|
43 |
+
The example below measures the latency for the ``calculate()`` function.
|
44 |
+
|
45 |
+
::
|
46 |
+
|
47 |
+
import time
|
48 |
+
import torch.distributed.elastic.metrics as metrics
|
49 |
+
|
50 |
+
# makes all metrics other than the one from "my_module" to go /dev/null
|
51 |
+
metrics.configure(metrics.NullMetricsHandler())
|
52 |
+
metrics.configure(metrics.ConsoleMetricsHandler(), "my_module")
|
53 |
+
|
54 |
+
def my_method():
|
55 |
+
start = time.time()
|
56 |
+
calculate()
|
57 |
+
end = time.time()
|
58 |
+
metrics.put_metric("calculate_latency", int(end-start), "my_module")
|
59 |
+
|
60 |
+
You may also use the torch.distributed.elastic.metrics.prof` decorator
|
61 |
+
to conveniently and succinctly profile functions
|
62 |
+
|
63 |
+
::
|
64 |
+
|
65 |
+
# -- in module examples.foobar --
|
66 |
+
|
67 |
+
import torch.distributed.elastic.metrics as metrics
|
68 |
+
|
69 |
+
metrics.configure(metrics.ConsoleMetricsHandler(), "foobar")
|
70 |
+
metrics.configure(metrics.ConsoleMetricsHandler(), "Bar")
|
71 |
+
|
72 |
+
@metrics.prof
|
73 |
+
def foo():
|
74 |
+
pass
|
75 |
+
|
76 |
+
class Bar():
|
77 |
+
|
78 |
+
@metrics.prof
|
79 |
+
def baz():
|
80 |
+
pass
|
81 |
+
|
82 |
+
``@metrics.prof`` will publish the following metrics
|
83 |
+
::
|
84 |
+
|
85 |
+
<leaf_module or classname>.success - 1 if the function finished successfully
|
86 |
+
<leaf_module or classname>.failure - 1 if the function threw an exception
|
87 |
+
<leaf_module or classname>.duration.ms - function duration in milliseconds
|
88 |
+
|
89 |
+
**Configuring Metrics Handler**:
|
90 |
+
|
91 |
+
`torch.distributed.elastic.metrics.MetricHandler` is responsible for emitting
|
92 |
+
the added metric values to a particular destination. Metric groups can be
|
93 |
+
configured with different metric handlers.
|
94 |
+
|
95 |
+
By default torchelastic emits all metrics to ``/dev/null``.
|
96 |
+
By adding the following configuration metrics,
|
97 |
+
``torchelastic`` and ``my_app`` metric groups will be printed out to
|
98 |
+
console.
|
99 |
+
|
100 |
+
::
|
101 |
+
|
102 |
+
import torch.distributed.elastic.metrics as metrics
|
103 |
+
|
104 |
+
metrics.configure(metrics.ConsoleMetricHandler(), group = "torchelastic")
|
105 |
+
metrics.configure(metrics.ConsoleMetricHandler(), group = "my_app")
|
106 |
+
|
107 |
+
**Writing a Custom Metric Handler**:
|
108 |
+
|
109 |
+
If you want your metrics to be emitted to a custom location, implement
|
110 |
+
the `torch.distributed.elastic.metrics.MetricHandler` interface
|
111 |
+
and configure your job to use your custom metric handler.
|
112 |
+
|
113 |
+
Below is a toy example that prints the metrics to ``stdout``
|
114 |
+
|
115 |
+
::
|
116 |
+
|
117 |
+
import torch.distributed.elastic.metrics as metrics
|
118 |
+
|
119 |
+
class StdoutMetricHandler(metrics.MetricHandler):
|
120 |
+
def emit(self, metric_data):
|
121 |
+
ts = metric_data.timestamp
|
122 |
+
group = metric_data.group_name
|
123 |
+
name = metric_data.name
|
124 |
+
value = metric_data.value
|
125 |
+
print(f"[{ts}][{group}]: {name}={value}")
|
126 |
+
|
127 |
+
metrics.configure(StdoutMetricHandler(), group="my_app")
|
128 |
+
|
129 |
+
Now all metrics in the group ``my_app`` will be printed to stdout as:
|
130 |
+
|
131 |
+
::
|
132 |
+
|
133 |
+
[1574213883.4182858][my_app]: my_metric=<value>
|
134 |
+
[1574213940.5237644][my_app]: my_metric=<value>
|
135 |
+
|
136 |
+
"""
|
137 |
+
|
138 |
+
from typing import Optional
|
139 |
+
|
140 |
+
from .api import ( # noqa: F401
|
141 |
+
ConsoleMetricHandler,
|
142 |
+
MetricData,
|
143 |
+
MetricHandler,
|
144 |
+
MetricsConfig,
|
145 |
+
NullMetricHandler,
|
146 |
+
configure,
|
147 |
+
get_elapsed_time_ms,
|
148 |
+
getStream,
|
149 |
+
prof,
|
150 |
+
profile,
|
151 |
+
publish_metric,
|
152 |
+
put_metric,
|
153 |
+
)
|
154 |
+
|
155 |
+
|
156 |
+
def initialize_metrics(cfg: Optional[MetricsConfig] = None):
|
157 |
+
pass
|
158 |
+
|
159 |
+
|
160 |
+
try:
|
161 |
+
from torch.distributed.elastic.metrics.static_init import * # type: ignore[import] # noqa: F401 F403
|
162 |
+
except ModuleNotFoundError:
|
163 |
+
pass
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (4.92 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__pycache__/api.cpython-310.pyc
ADDED
Binary file (5.98 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/metrics/api.py
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
|
3 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
4 |
+
# All rights reserved.
|
5 |
+
#
|
6 |
+
# This source code is licensed under the BSD-style license found in the
|
7 |
+
# LICENSE file in the root directory of this source tree.
|
8 |
+
|
9 |
+
import abc
|
10 |
+
import time
|
11 |
+
import warnings
|
12 |
+
from collections import namedtuple
|
13 |
+
from functools import wraps
|
14 |
+
from typing import Dict, Optional
|
15 |
+
|
16 |
+
__all__ = ['MetricsConfig', 'MetricHandler', 'ConsoleMetricHandler', 'NullMetricHandler', 'MetricStream',
|
17 |
+
'configure', 'getStream', 'prof', 'profile', 'put_metric', 'publish_metric', 'get_elapsed_time_ms',
|
18 |
+
'MetricData']
|
19 |
+
|
20 |
+
MetricData = namedtuple("MetricData", ["timestamp", "group_name", "name", "value"])
|
21 |
+
|
22 |
+
|
23 |
+
class MetricsConfig:
|
24 |
+
__slots__ = ["params"]
|
25 |
+
|
26 |
+
def __init__(self, params: Optional[Dict[str, str]] = None):
|
27 |
+
self.params = params
|
28 |
+
if self.params is None:
|
29 |
+
self.params = {}
|
30 |
+
|
31 |
+
|
32 |
+
class MetricHandler(abc.ABC):
|
33 |
+
@abc.abstractmethod
|
34 |
+
def emit(self, metric_data: MetricData):
|
35 |
+
pass
|
36 |
+
|
37 |
+
|
38 |
+
class ConsoleMetricHandler(MetricHandler):
|
39 |
+
def emit(self, metric_data: MetricData):
|
40 |
+
print(
|
41 |
+
f"[{metric_data.timestamp}][{metric_data.group_name}]: {metric_data.name}={metric_data.value}"
|
42 |
+
)
|
43 |
+
|
44 |
+
|
45 |
+
class NullMetricHandler(MetricHandler):
|
46 |
+
def emit(self, metric_data: MetricData):
|
47 |
+
pass
|
48 |
+
|
49 |
+
|
50 |
+
class MetricStream:
|
51 |
+
def __init__(self, group_name: str, handler: MetricHandler):
|
52 |
+
self.group_name = group_name
|
53 |
+
self.handler = handler
|
54 |
+
|
55 |
+
def add_value(self, metric_name: str, metric_value: int):
|
56 |
+
self.handler.emit(
|
57 |
+
MetricData(time.time(), self.group_name, metric_name, metric_value)
|
58 |
+
)
|
59 |
+
|
60 |
+
|
61 |
+
_metrics_map: Dict[str, MetricHandler] = {}
|
62 |
+
_default_metrics_handler: MetricHandler = NullMetricHandler()
|
63 |
+
|
64 |
+
|
65 |
+
# pyre-fixme[9]: group has type `str`; used as `None`.
|
66 |
+
def configure(handler: MetricHandler, group: Optional[str] = None):
|
67 |
+
if group is None:
|
68 |
+
global _default_metrics_handler
|
69 |
+
# pyre-fixme[9]: _default_metrics_handler has type `NullMetricHandler`; used
|
70 |
+
# as `MetricHandler`.
|
71 |
+
_default_metrics_handler = handler
|
72 |
+
else:
|
73 |
+
_metrics_map[group] = handler
|
74 |
+
|
75 |
+
|
76 |
+
def getStream(group: str):
|
77 |
+
if group in _metrics_map:
|
78 |
+
handler = _metrics_map[group]
|
79 |
+
else:
|
80 |
+
handler = _default_metrics_handler
|
81 |
+
return MetricStream(group, handler)
|
82 |
+
|
83 |
+
|
84 |
+
def _get_metric_name(fn):
|
85 |
+
qualname = fn.__qualname__
|
86 |
+
split = qualname.split(".")
|
87 |
+
if len(split) == 1:
|
88 |
+
module = fn.__module__
|
89 |
+
if module:
|
90 |
+
return module.split(".")[-1] + "." + split[0]
|
91 |
+
else:
|
92 |
+
return split[0]
|
93 |
+
else:
|
94 |
+
return qualname
|
95 |
+
|
96 |
+
|
97 |
+
def prof(fn=None, group: str = "torchelastic"):
|
98 |
+
r"""
|
99 |
+
@profile decorator publishes duration.ms, count, success, failure metrics for the function that it decorates.
|
100 |
+
|
101 |
+
The metric name defaults to the qualified name (``class_name.def_name``) of the function.
|
102 |
+
If the function does not belong to a class, it uses the leaf module name instead.
|
103 |
+
|
104 |
+
Usage
|
105 |
+
|
106 |
+
::
|
107 |
+
|
108 |
+
@metrics.prof
|
109 |
+
def x():
|
110 |
+
pass
|
111 |
+
|
112 |
+
@metrics.prof(group="agent")
|
113 |
+
def y():
|
114 |
+
pass
|
115 |
+
"""
|
116 |
+
|
117 |
+
def wrap(f):
|
118 |
+
@wraps(f)
|
119 |
+
def wrapper(*args, **kwargs):
|
120 |
+
key = _get_metric_name(f)
|
121 |
+
try:
|
122 |
+
start = time.time()
|
123 |
+
result = f(*args, **kwargs)
|
124 |
+
put_metric(f"{key}.success", 1, group)
|
125 |
+
except Exception:
|
126 |
+
put_metric(f"{key}.failure", 1, group)
|
127 |
+
raise
|
128 |
+
finally:
|
129 |
+
put_metric(f"{key}.duration.ms", get_elapsed_time_ms(start), group) # type: ignore[possibly-undefined]
|
130 |
+
return result
|
131 |
+
|
132 |
+
return wrapper
|
133 |
+
|
134 |
+
if fn:
|
135 |
+
return wrap(fn)
|
136 |
+
else:
|
137 |
+
return wrap
|
138 |
+
|
139 |
+
|
140 |
+
def profile(group=None):
|
141 |
+
"""
|
142 |
+
@profile decorator adds latency and success/failure metrics to any given function.
|
143 |
+
|
144 |
+
Usage
|
145 |
+
|
146 |
+
::
|
147 |
+
|
148 |
+
@metrics.profile("my_metric_group")
|
149 |
+
def some_function(<arguments>):
|
150 |
+
"""
|
151 |
+
warnings.warn("Deprecated, use @prof instead", DeprecationWarning)
|
152 |
+
|
153 |
+
def wrap(func):
|
154 |
+
@wraps(func)
|
155 |
+
def wrapper(*args, **kwargs):
|
156 |
+
try:
|
157 |
+
start_time = time.time()
|
158 |
+
result = func(*args, **kwargs)
|
159 |
+
publish_metric(group, f"{func.__name__}.success", 1)
|
160 |
+
except Exception:
|
161 |
+
publish_metric(group, f"{func.__name__}.failure", 1)
|
162 |
+
raise
|
163 |
+
finally:
|
164 |
+
publish_metric(
|
165 |
+
group,
|
166 |
+
f"{func.__name__}.duration.ms",
|
167 |
+
get_elapsed_time_ms(start_time), # type: ignore[possibly-undefined]
|
168 |
+
)
|
169 |
+
return result
|
170 |
+
|
171 |
+
return wrapper
|
172 |
+
|
173 |
+
return wrap
|
174 |
+
|
175 |
+
|
176 |
+
def put_metric(metric_name: str, metric_value: int, metric_group: str = "torchelastic"):
|
177 |
+
"""
|
178 |
+
Publish a metric data point.
|
179 |
+
|
180 |
+
Usage
|
181 |
+
|
182 |
+
::
|
183 |
+
|
184 |
+
put_metric("metric_name", 1)
|
185 |
+
put_metric("metric_name", 1, "metric_group_name")
|
186 |
+
"""
|
187 |
+
getStream(metric_group).add_value(metric_name, metric_value)
|
188 |
+
|
189 |
+
|
190 |
+
def publish_metric(metric_group: str, metric_name: str, metric_value: int):
|
191 |
+
warnings.warn(
|
192 |
+
"Deprecated, use put_metric(metric_group)(metric_name, metric_value) instead"
|
193 |
+
)
|
194 |
+
metric_stream = getStream(metric_group)
|
195 |
+
metric_stream.add_value(metric_name, metric_value)
|
196 |
+
|
197 |
+
|
198 |
+
def get_elapsed_time_ms(start_time_in_seconds: float):
|
199 |
+
"""Return the elapsed time in millis from the given start time."""
|
200 |
+
end_time = time.time()
|
201 |
+
return int((end_time - start_time_in_seconds) * 1000)
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__init__.py
ADDED
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
|
3 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
4 |
+
# All rights reserved.
|
5 |
+
#
|
6 |
+
# This source code is licensed under the BSD-style license found in the
|
7 |
+
# LICENSE file in the root directory of this source tree.
|
8 |
+
|
9 |
+
"""
|
10 |
+
Library that launches and manages ``n`` copies of worker subprocesses either specified by a function or a binary.
|
11 |
+
|
12 |
+
For functions, it uses ``torch.multiprocessing`` (and therefore python
|
13 |
+
``multiprocessing``) to spawn/fork worker processes. For binaries it uses python
|
14 |
+
``subprocessing.Popen`` to create worker processes.
|
15 |
+
|
16 |
+
|
17 |
+
Usage 1: Launching two trainers as a function
|
18 |
+
|
19 |
+
::
|
20 |
+
|
21 |
+
from torch.distributed.elastic.multiprocessing import Std, start_processes
|
22 |
+
|
23 |
+
def trainer(a, b, c):
|
24 |
+
pass # train
|
25 |
+
|
26 |
+
|
27 |
+
# runs two trainers
|
28 |
+
# LOCAL_RANK=0 trainer(1,2,3)
|
29 |
+
# LOCAL_RANK=1 trainer(4,5,6)
|
30 |
+
ctx = start_processes(
|
31 |
+
name="trainer",
|
32 |
+
entrypoint=trainer,
|
33 |
+
args={0: (1,2,3), 1: (4,5,6)},
|
34 |
+
envs={0: {"LOCAL_RANK": 0}, 1: {"LOCAL_RANK": 1}},
|
35 |
+
log_dir="/tmp/foobar",
|
36 |
+
redirects=Std.ALL, # write all worker stdout/stderr to a log file
|
37 |
+
tee={0: Std.ERR}, # tee only local rank 0's stderr to console
|
38 |
+
)
|
39 |
+
|
40 |
+
# waits for all copies of trainer to finish
|
41 |
+
ctx.wait()
|
42 |
+
|
43 |
+
Usage 2: Launching 2 echo workers as a binary
|
44 |
+
|
45 |
+
::
|
46 |
+
|
47 |
+
# same as invoking
|
48 |
+
# echo hello
|
49 |
+
# echo world > stdout.log
|
50 |
+
ctx = start_processes(
|
51 |
+
name="echo"
|
52 |
+
entrypoint="echo",
|
53 |
+
log_dir="/tmp/foobar",
|
54 |
+
args={0: "hello", 1: "world"},
|
55 |
+
redirects={1: Std.OUT},
|
56 |
+
)
|
57 |
+
|
58 |
+
Just like ``torch.multiprocessing``, the return value of the function
|
59 |
+
:func:`start_processes` is a process context (:class:`api.PContext`). If a function
|
60 |
+
was launched, a :class:`api.MultiprocessContext` is returned and if a binary
|
61 |
+
was launched a :class:`api.SubprocessContext` is returned. Both are specific
|
62 |
+
implementations of the parent :class:`api.PContext` class.
|
63 |
+
"""
|
64 |
+
|
65 |
+
import os
|
66 |
+
from typing import Callable, Dict, Optional, Tuple, Union, Set
|
67 |
+
|
68 |
+
from torch.distributed.elastic.multiprocessing.api import ( # noqa: F401
|
69 |
+
_validate_full_rank,
|
70 |
+
DefaultLogsSpecs,
|
71 |
+
LogsDest,
|
72 |
+
LogsSpecs,
|
73 |
+
MultiprocessContext,
|
74 |
+
PContext,
|
75 |
+
ProcessFailure,
|
76 |
+
RunProcsResult,
|
77 |
+
SignalException,
|
78 |
+
Std,
|
79 |
+
SubprocessContext,
|
80 |
+
to_map,
|
81 |
+
)
|
82 |
+
from torch.distributed.elastic.utils.logging import get_logger
|
83 |
+
|
84 |
+
__all__ = [
|
85 |
+
"start_processes",
|
86 |
+
"MultiprocessContext",
|
87 |
+
"PContext",
|
88 |
+
"ProcessFailure",
|
89 |
+
"RunProcsResult",
|
90 |
+
"SignalException",
|
91 |
+
"Std",
|
92 |
+
"LogsDest",
|
93 |
+
"LogsSpecs",
|
94 |
+
"DefaultLogsSpecs",
|
95 |
+
"SubprocessContext",
|
96 |
+
"to_map",
|
97 |
+
]
|
98 |
+
|
99 |
+
log = get_logger(__name__)
|
100 |
+
|
101 |
+
|
102 |
+
def start_processes(
|
103 |
+
name: str,
|
104 |
+
entrypoint: Union[Callable, str],
|
105 |
+
args: Dict[int, Tuple],
|
106 |
+
envs: Dict[int, Dict[str, str]],
|
107 |
+
logs_specs: LogsSpecs,
|
108 |
+
log_line_prefixes: Optional[Dict[int, str]] = None,
|
109 |
+
start_method: str = "spawn",
|
110 |
+
) -> PContext:
|
111 |
+
"""
|
112 |
+
Start ``n`` copies of ``entrypoint`` processes with the provided options.
|
113 |
+
|
114 |
+
``entrypoint`` is either a ``Callable`` (function) or a ``str`` (binary).
|
115 |
+
The number of copies is determined by the number of entries for ``args`` and
|
116 |
+
``envs`` arguments, which need to have the same key set.
|
117 |
+
|
118 |
+
``args`` and ``env`` parameters are the arguments and environment variables
|
119 |
+
to pass down to the entrypoint mapped by the replica index (local rank).
|
120 |
+
All local ranks must be accounted for.
|
121 |
+
That is, the keyset should be ``{0,1,...,(nprocs-1)}``.
|
122 |
+
|
123 |
+
.. note:: When the ``entrypoint`` is a binary (``str``), ``args`` can only be strings.
|
124 |
+
If any other type is given, then it is casted to a string representation
|
125 |
+
(e.g. ``str(arg1)``). Furthermore, a binary failure will only write
|
126 |
+
an ``error.json`` error file if the main function is annotated with
|
127 |
+
``torch.distributed.elastic.multiprocessing.errors.record``. For function launches,
|
128 |
+
this is done by default and there is no need to manually annotate
|
129 |
+
with the ``@record`` annotation.
|
130 |
+
|
131 |
+
``redirects`` and ``tee`` are bitmasks specifying which std stream(s) to redirect
|
132 |
+
to a log file in the ``log_dir``. Valid mask values are defined in ``Std``.
|
133 |
+
To redirect/tee only certain local ranks, pass ``redirects`` as a map with the key as
|
134 |
+
the local rank to specify the redirect behavior for.
|
135 |
+
Any missing local ranks will default to ``Std.NONE``.
|
136 |
+
|
137 |
+
``tee`` acts like the unix "tee" command in that it redirects + prints to console.
|
138 |
+
To avoid worker stdout/stderr from printing to console, use the ``redirects`` parameter.
|
139 |
+
|
140 |
+
For each process, the ``log_dir`` will contain:
|
141 |
+
|
142 |
+
#. ``{local_rank}/error.json``: if the process failed, a file with the error info
|
143 |
+
#. ``{local_rank}/stdout.json``: if ``redirect & STDOUT == STDOUT``
|
144 |
+
#. ``{local_rank}/stderr.json``: if ``redirect & STDERR == STDERR``
|
145 |
+
|
146 |
+
.. note:: It is expected that the ``log_dir`` exists, is empty, and is a directory.
|
147 |
+
|
148 |
+
Example:
|
149 |
+
::
|
150 |
+
|
151 |
+
log_dir = "/tmp/test"
|
152 |
+
|
153 |
+
# ok; two copies of foo: foo("bar0"), foo("bar1")
|
154 |
+
start_processes(
|
155 |
+
name="trainer",
|
156 |
+
entrypoint=foo,
|
157 |
+
args:{0:("bar0",), 1:("bar1",),
|
158 |
+
envs:{0:{}, 1:{}},
|
159 |
+
log_dir=log_dir
|
160 |
+
)
|
161 |
+
|
162 |
+
# invalid; envs missing for local rank 1
|
163 |
+
start_processes(
|
164 |
+
name="trainer",
|
165 |
+
entrypoint=foo,
|
166 |
+
args:{0:("bar0",), 1:("bar1",),
|
167 |
+
envs:{0:{}},
|
168 |
+
log_dir=log_dir
|
169 |
+
)
|
170 |
+
|
171 |
+
# ok; two copies of /usr/bin/touch: touch file1, touch file2
|
172 |
+
start_processes(
|
173 |
+
name="trainer",
|
174 |
+
entrypoint="/usr/bin/touch",
|
175 |
+
args:{0:("file1",), 1:("file2",),
|
176 |
+
envs:{0:{}, 1:{}},
|
177 |
+
log_dir=log_dir
|
178 |
+
)
|
179 |
+
|
180 |
+
# caution; arguments casted to string, runs:
|
181 |
+
# echo "1" "2" "3" and echo "[1, 2, 3]"
|
182 |
+
start_processes(
|
183 |
+
name="trainer",
|
184 |
+
entrypoint="/usr/bin/echo",
|
185 |
+
args:{0:(1,2,3), 1:([1,2,3],),
|
186 |
+
envs:{0:{}, 1:{}},
|
187 |
+
log_dir=log_dir
|
188 |
+
)
|
189 |
+
|
190 |
+
Args:
|
191 |
+
name: a human readable short name that describes what the processes are
|
192 |
+
(used as header when tee'ing stdout/stderr outputs)
|
193 |
+
entrypoint: either a ``Callable`` (function) or ``cmd`` (binary)
|
194 |
+
args: arguments to each replica
|
195 |
+
envs: env vars to each replica
|
196 |
+
log_dir: directory used to write log files
|
197 |
+
start_method: multiprocessing start method (spawn, fork, forkserver)
|
198 |
+
ignored for binaries
|
199 |
+
redirects: which std streams to redirect to a log file
|
200 |
+
tee: which std streams to redirect + print to console
|
201 |
+
local_ranks_filter: which ranks' logs to print to console
|
202 |
+
|
203 |
+
"""
|
204 |
+
|
205 |
+
nprocs = len(args)
|
206 |
+
_validate_full_rank(args, nprocs, "args")
|
207 |
+
_validate_full_rank(envs, nprocs, "envs")
|
208 |
+
|
209 |
+
context: PContext
|
210 |
+
if isinstance(entrypoint, str):
|
211 |
+
context = SubprocessContext(
|
212 |
+
name=name,
|
213 |
+
entrypoint=entrypoint,
|
214 |
+
args=args,
|
215 |
+
envs=envs,
|
216 |
+
logs_specs=logs_specs,
|
217 |
+
log_line_prefixes=log_line_prefixes,
|
218 |
+
)
|
219 |
+
else:
|
220 |
+
context = MultiprocessContext(
|
221 |
+
name=name,
|
222 |
+
entrypoint=entrypoint,
|
223 |
+
args=args,
|
224 |
+
envs=envs,
|
225 |
+
log_line_prefixes=log_line_prefixes,
|
226 |
+
start_method=start_method,
|
227 |
+
logs_specs=logs_specs,
|
228 |
+
)
|
229 |
+
|
230 |
+
try:
|
231 |
+
context.start()
|
232 |
+
return context
|
233 |
+
except Exception:
|
234 |
+
context.close()
|
235 |
+
raise
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (6.99 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/api.cpython-310.pyc
ADDED
Binary file (25 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/redirects.cpython-310.pyc
ADDED
Binary file (2.88 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/tail_log.cpython-310.pyc
ADDED
Binary file (4.42 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/api.py
ADDED
@@ -0,0 +1,873 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
|
3 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
4 |
+
# All rights reserved.
|
5 |
+
#
|
6 |
+
# This source code is licensed under the BSD-style license found in the
|
7 |
+
# LICENSE file in the root directory of this source tree.
|
8 |
+
|
9 |
+
import abc
|
10 |
+
import logging
|
11 |
+
import os
|
12 |
+
import re
|
13 |
+
import shutil
|
14 |
+
import signal
|
15 |
+
import subprocess
|
16 |
+
import sys
|
17 |
+
import tempfile
|
18 |
+
import time
|
19 |
+
from contextlib import nullcontext
|
20 |
+
from dataclasses import dataclass, field
|
21 |
+
from enum import IntFlag
|
22 |
+
from multiprocessing import synchronize
|
23 |
+
from types import FrameType
|
24 |
+
from typing import Any, Callable, Dict, Optional, Set, Tuple, Union
|
25 |
+
from abc import ABC, abstractmethod
|
26 |
+
|
27 |
+
import torch.multiprocessing as mp
|
28 |
+
from torch.distributed.elastic.multiprocessing.errors import ProcessFailure, record
|
29 |
+
from torch.distributed.elastic.multiprocessing.redirects import (
|
30 |
+
redirect_stderr,
|
31 |
+
redirect_stdout,
|
32 |
+
)
|
33 |
+
|
34 |
+
from torch.distributed.elastic.multiprocessing.subprocess_handler import SubprocessHandler, get_subprocess_handler
|
35 |
+
from torch.distributed.elastic.multiprocessing.tail_log import TailLog
|
36 |
+
|
37 |
+
IS_WINDOWS = sys.platform == "win32"
|
38 |
+
IS_MACOS = sys.platform == "darwin"
|
39 |
+
|
40 |
+
|
41 |
+
log = logging.getLogger(__name__)
|
42 |
+
|
43 |
+
__all__ = [
|
44 |
+
"DefaultLogsSpecs",
|
45 |
+
"SignalException",
|
46 |
+
"Std",
|
47 |
+
"to_map",
|
48 |
+
"RunProcsResult",
|
49 |
+
"PContext",
|
50 |
+
"get_std_cm",
|
51 |
+
"MultiprocessContext",
|
52 |
+
"SubprocessContext",
|
53 |
+
]
|
54 |
+
|
55 |
+
class SignalException(Exception):
|
56 |
+
"""
|
57 |
+
Exception is raised inside the torchelastic agent process by the termination handler
|
58 |
+
if the death signal got received by the process.
|
59 |
+
"""
|
60 |
+
|
61 |
+
def __init__(self, msg: str, sigval: signal.Signals) -> None:
|
62 |
+
super().__init__(msg)
|
63 |
+
self.sigval = sigval
|
64 |
+
|
65 |
+
|
66 |
+
def _terminate_process_handler(signum: int, frame: Optional[FrameType]) -> None:
|
67 |
+
"""Termination handler that raises exceptions on the main process.
|
68 |
+
|
69 |
+
When the process receives death signal(SIGTERM, SIGINT), this termination handler will
|
70 |
+
be invoked. It raises the ``SignalException`` exception that should be processed by the
|
71 |
+
user code. Python does not terminate process after the termination handler is finished,
|
72 |
+
so the exception should not be silently ignored, otherwise the process will never
|
73 |
+
be terminated.
|
74 |
+
"""
|
75 |
+
sigval = signal.Signals(signum)
|
76 |
+
raise SignalException(f"Process {os.getpid()} got signal: {sigval}", sigval=sigval)
|
77 |
+
|
78 |
+
|
79 |
+
def _get_kill_signal() -> signal.Signals:
|
80 |
+
"""Get the kill signal. SIGKILL for unix, CTRL_C_EVENT for windows."""
|
81 |
+
if IS_WINDOWS:
|
82 |
+
return signal.CTRL_C_EVENT # type: ignore[attr-defined] # noqa: F821
|
83 |
+
else:
|
84 |
+
return signal.SIGKILL
|
85 |
+
|
86 |
+
|
87 |
+
def _get_default_signal() -> signal.Signals:
|
88 |
+
"""Get the default termination signal. SIGTERM for unix, CTRL_C_EVENT for windows."""
|
89 |
+
if IS_WINDOWS:
|
90 |
+
return signal.CTRL_C_EVENT # type: ignore[attr-defined] # noqa: F821
|
91 |
+
else:
|
92 |
+
return signal.SIGTERM
|
93 |
+
|
94 |
+
|
95 |
+
def _validate_full_rank(d: Dict[int, Any], nprocs: int, what: str):
|
96 |
+
actual_keys = set(d.keys())
|
97 |
+
expected_keys = set(range(nprocs))
|
98 |
+
|
99 |
+
if actual_keys != expected_keys:
|
100 |
+
raise RuntimeError(
|
101 |
+
f"{what}, local rank mapping mismatch,"
|
102 |
+
f" expected: {expected_keys}, actual: {actual_keys}"
|
103 |
+
)
|
104 |
+
|
105 |
+
|
106 |
+
_MAPPING_REGEX = r"^(\d:[0123],)*(\d:[0123])$"
|
107 |
+
_VALUE_REGEX = r"^[0123]$"
|
108 |
+
|
109 |
+
|
110 |
+
class Std(IntFlag):
|
111 |
+
NONE = 0
|
112 |
+
OUT = 1
|
113 |
+
ERR = 2
|
114 |
+
ALL = OUT | ERR
|
115 |
+
|
116 |
+
@classmethod
|
117 |
+
def from_str(cls, vm: str) -> Union["Std", Dict[int, "Std"]]:
|
118 |
+
"""
|
119 |
+
Example:
|
120 |
+
::
|
121 |
+
|
122 |
+
from_str("0") -> Std.NONE
|
123 |
+
from_str("1") -> Std.OUT
|
124 |
+
from_str("0:3,1:0,2:1,3:2") -> {0: Std.ALL, 1: Std.NONE, 2: Std.OUT, 3: Std.ERR}
|
125 |
+
|
126 |
+
Any other input raises an exception
|
127 |
+
"""
|
128 |
+
|
129 |
+
def to_std(v: str) -> Std: # type: ignore[return]
|
130 |
+
s = Std(int(v))
|
131 |
+
if s in Std:
|
132 |
+
return s
|
133 |
+
# return None -> should NEVER reach here since we regex check input
|
134 |
+
|
135 |
+
if re.match(_VALUE_REGEX, vm): # vm is a number (e.g. 0)
|
136 |
+
return to_std(vm)
|
137 |
+
elif re.match(_MAPPING_REGEX, vm): # vm is a mapping (e.g. 0:1,1:2)
|
138 |
+
d: Dict[int, Std] = {}
|
139 |
+
for m in vm.split(","):
|
140 |
+
i, v = m.split(":")
|
141 |
+
d[int(i)] = to_std(v)
|
142 |
+
return d
|
143 |
+
else:
|
144 |
+
raise ValueError(
|
145 |
+
f"{vm} does not match: <{_VALUE_REGEX}> or <{_MAPPING_REGEX}>"
|
146 |
+
)
|
147 |
+
|
148 |
+
|
149 |
+
def to_map(
|
150 |
+
val_or_map: Union[Std, Dict[int, Std]], local_world_size: int
|
151 |
+
) -> Dict[int, Std]:
|
152 |
+
"""
|
153 |
+
Certain APIs take redirect settings either as a single value (e.g. apply to all
|
154 |
+
local ranks) or as an explicit user-provided mapping. This method is a convenience
|
155 |
+
method that converts a value or mapping into a mapping.
|
156 |
+
|
157 |
+
Example:
|
158 |
+
::
|
159 |
+
|
160 |
+
to_map(Std.OUT, local_world_size=2) # returns: {0: Std.OUT, 1: Std.OUT}
|
161 |
+
to_map({1: Std.OUT}, local_world_size=2) # returns: {0: Std.NONE, 1: Std.OUT}
|
162 |
+
to_map({0: Std.OUT, 1: Std.OUT}, local_world_size=2) # returns: {0: Std.OUT, 1: Std.OUT}
|
163 |
+
"""
|
164 |
+
if isinstance(val_or_map, Std):
|
165 |
+
return dict.fromkeys(range(local_world_size), val_or_map)
|
166 |
+
else:
|
167 |
+
map = {}
|
168 |
+
for i in range(local_world_size):
|
169 |
+
map[i] = val_or_map.get(i, Std.NONE)
|
170 |
+
return map
|
171 |
+
|
172 |
+
|
173 |
+
@dataclass
|
174 |
+
class LogsDest:
|
175 |
+
"""
|
176 |
+
For each log type, holds mapping of local rank ids to file paths.
|
177 |
+
"""
|
178 |
+
stdouts: Dict[int, str] = field(default_factory=dict)
|
179 |
+
stderrs: Dict[int, str] = field(default_factory=dict)
|
180 |
+
tee_stdouts: Dict[int, str] = field(default_factory=dict)
|
181 |
+
tee_stderrs: Dict[int, str] = field(default_factory=dict)
|
182 |
+
error_files: Dict[int, str] = field(default_factory=dict)
|
183 |
+
|
184 |
+
|
185 |
+
class LogsSpecs(ABC):
|
186 |
+
"""
|
187 |
+
Defines logs processing and redirection for each worker process.
|
188 |
+
|
189 |
+
Args:
|
190 |
+
log_dir:
|
191 |
+
Base directory where logs will be written.
|
192 |
+
redirects:
|
193 |
+
Streams to redirect to files. Pass a single ``Std``
|
194 |
+
enum to redirect for all workers, or a mapping keyed
|
195 |
+
by local_rank to selectively redirect.
|
196 |
+
tee:
|
197 |
+
Streams to duplicate to stdout/stderr.
|
198 |
+
Pass a single ``Std`` enum to duplicate streams for all workers,
|
199 |
+
or a mapping keyed by local_rank to selectively duplicate.
|
200 |
+
"""
|
201 |
+
|
202 |
+
def __init__(
|
203 |
+
self,
|
204 |
+
log_dir: Optional[str] = None,
|
205 |
+
redirects: Union[Std, Dict[int, Std]] = Std.NONE,
|
206 |
+
tee: Union[Std, Dict[int, Std]] = Std.NONE,
|
207 |
+
local_ranks_filter: Optional[Set[int]] = None,
|
208 |
+
) -> None:
|
209 |
+
self._root_log_dir = log_dir
|
210 |
+
self._redirects = redirects
|
211 |
+
self._tee = tee
|
212 |
+
self._local_ranks_filter = local_ranks_filter
|
213 |
+
|
214 |
+
@abstractmethod
|
215 |
+
def reify(self, envs: Dict[int, Dict[str, str]],) -> LogsDest:
|
216 |
+
"""
|
217 |
+
Given the environment variables, builds destination of log files for each of the local ranks.
|
218 |
+
|
219 |
+
Envs parameter contains env variables dict for each of the local ranks, where entries are defined in:
|
220 |
+
:func:`~torchelastic.distributed.elastic.agent.server.local_elastic_agent.LocalElasticAgent._start_workers`.
|
221 |
+
"""
|
222 |
+
pass
|
223 |
+
|
224 |
+
@property
|
225 |
+
@abstractmethod
|
226 |
+
def root_log_dir(self) -> str:
|
227 |
+
pass
|
228 |
+
|
229 |
+
class DefaultLogsSpecs(LogsSpecs):
|
230 |
+
"""
|
231 |
+
Default LogsSpecs implementation:
|
232 |
+
|
233 |
+
- `log_dir` will be created if it doesn't exist
|
234 |
+
- Generates nested folders for each attempt and rank.
|
235 |
+
"""
|
236 |
+
def __init__(
|
237 |
+
self,
|
238 |
+
log_dir: Optional[str] = None,
|
239 |
+
redirects: Union[Std, Dict[int, Std]] = Std.NONE,
|
240 |
+
tee: Union[Std, Dict[int, Std]] = Std.NONE,
|
241 |
+
local_ranks_filter: Optional[Set[int]] = None,
|
242 |
+
) -> None:
|
243 |
+
if log_dir != os.devnull:
|
244 |
+
if not log_dir:
|
245 |
+
log_dir = tempfile.mkdtemp(prefix="torchelastic_")
|
246 |
+
elif not os.path.exists(log_dir):
|
247 |
+
os.makedirs(log_dir)
|
248 |
+
else:
|
249 |
+
if os.path.isfile(log_dir):
|
250 |
+
raise NotADirectoryError(f"log_dir: {log_dir} is a file")
|
251 |
+
super().__init__(log_dir, redirects, tee, local_ranks_filter)
|
252 |
+
# initialized only once
|
253 |
+
self._run_log_dir = None
|
254 |
+
|
255 |
+
@property
|
256 |
+
def root_log_dir(self) -> str:
|
257 |
+
return str(self._root_log_dir)
|
258 |
+
|
259 |
+
def _make_log_dir(self, log_dir: Optional[str], rdzv_run_id: str):
|
260 |
+
base_log_dir = log_dir or tempfile.mkdtemp(prefix="torchelastic_")
|
261 |
+
os.makedirs(base_log_dir, exist_ok=True)
|
262 |
+
dir = tempfile.mkdtemp(prefix=f"{rdzv_run_id}_", dir=base_log_dir)
|
263 |
+
log.info("log directory set to: %s", dir)
|
264 |
+
return dir
|
265 |
+
|
266 |
+
def reify(self, envs: Dict[int, Dict[str, str]],) -> LogsDest:
|
267 |
+
"""
|
268 |
+
Uses following scheme to build log destination paths:
|
269 |
+
|
270 |
+
- `<log_dir>/<rdzv_run_id>/attempt_<attempt>/<rank>/stdout.log`
|
271 |
+
- `<log_dir>/<rdzv_run_id>/attempt_<attempt>/<rank>/stderr.log`
|
272 |
+
- `<log_dir>/<rdzv_run_id>/attempt_<attempt>/<rank>/error.json`
|
273 |
+
"""
|
274 |
+
nprocs = len(envs)
|
275 |
+
global_env = {} # use only to query properies that are not dependent on a rank
|
276 |
+
if nprocs > 0:
|
277 |
+
global_env = envs[0]
|
278 |
+
else:
|
279 |
+
log.warning("Empty envs map provided when defining logging destinations.")
|
280 |
+
# Keys are always defined, but values can be missing in unit tests
|
281 |
+
run_id = global_env.get("TORCHELASTIC_RUN_ID", "test_run_id")
|
282 |
+
restart_count = global_env.get("TORCHELASTIC_RESTART_COUNT", "0")
|
283 |
+
|
284 |
+
attempt_log_dir: str = ""
|
285 |
+
if self._root_log_dir != os.devnull:
|
286 |
+
if not self._run_log_dir:
|
287 |
+
self._run_log_dir = self._make_log_dir(self._root_log_dir, run_id)
|
288 |
+
|
289 |
+
attempt_log_dir = os.path.join(self._run_log_dir, f"attempt_{restart_count}") # type: ignore[call-overload]
|
290 |
+
shutil.rmtree(attempt_log_dir, ignore_errors=True)
|
291 |
+
os.makedirs(attempt_log_dir)
|
292 |
+
|
293 |
+
if self._root_log_dir == os.devnull:
|
294 |
+
attempt_log_dir = os.devnull
|
295 |
+
|
296 |
+
# create subdirs for each local rank in the logs_dir
|
297 |
+
# logs_dir
|
298 |
+
# |- 0
|
299 |
+
# |- error.json
|
300 |
+
# |- stdout.log
|
301 |
+
# |- stderr.log
|
302 |
+
# |- ...
|
303 |
+
# |- (nprocs-1)
|
304 |
+
redirs = to_map(self._redirects, nprocs)
|
305 |
+
ts = to_map(self._tee, nprocs)
|
306 |
+
|
307 |
+
# to tee stdout/stderr we first redirect into a file
|
308 |
+
# then tail -f stdout.log/stderr.log so add tee settings to redirects
|
309 |
+
for local_rank, tee_std in ts.items():
|
310 |
+
redirect_std = redirs[local_rank]
|
311 |
+
redirs[local_rank] = redirect_std | tee_std
|
312 |
+
|
313 |
+
SYS_STREAM = "" # special case to indicate to output to console
|
314 |
+
stdouts = dict.fromkeys(range(nprocs), SYS_STREAM)
|
315 |
+
stderrs = dict.fromkeys(range(nprocs), SYS_STREAM)
|
316 |
+
tee_stdouts: Dict[int, str] = {}
|
317 |
+
tee_stderrs: Dict[int, str] = {}
|
318 |
+
error_files = {}
|
319 |
+
|
320 |
+
for local_rank in range(nprocs):
|
321 |
+
|
322 |
+
if attempt_log_dir == os.devnull:
|
323 |
+
tee_stdouts[local_rank] = os.devnull
|
324 |
+
tee_stderrs[local_rank] = os.devnull
|
325 |
+
error_files[local_rank] = os.devnull
|
326 |
+
envs[local_rank]["TORCHELASTIC_ERROR_FILE"] = ""
|
327 |
+
else:
|
328 |
+
clogdir = os.path.join(attempt_log_dir, str(local_rank))
|
329 |
+
os.mkdir(clogdir)
|
330 |
+
|
331 |
+
rd = redirs[local_rank]
|
332 |
+
if (rd & Std.OUT) == Std.OUT:
|
333 |
+
stdouts[local_rank] = os.path.join(clogdir, "stdout.log")
|
334 |
+
if (rd & Std.ERR) == Std.ERR:
|
335 |
+
stderrs[local_rank] = os.path.join(clogdir, "stderr.log")
|
336 |
+
|
337 |
+
t = ts[local_rank]
|
338 |
+
if t & Std.OUT == Std.OUT:
|
339 |
+
tee_stdouts[local_rank] = stdouts[local_rank]
|
340 |
+
if t & Std.ERR == Std.ERR:
|
341 |
+
tee_stderrs[local_rank] = stderrs[local_rank]
|
342 |
+
|
343 |
+
if self._local_ranks_filter and local_rank not in self._local_ranks_filter:
|
344 |
+
# If stream is tee'd, only write to file, but don't tail
|
345 |
+
if local_rank in tee_stdouts:
|
346 |
+
tee_stdouts.pop(local_rank, None)
|
347 |
+
if local_rank in tee_stderrs:
|
348 |
+
tee_stderrs.pop(local_rank, None)
|
349 |
+
|
350 |
+
# If stream is not redirected, don't print
|
351 |
+
if stdouts[local_rank] == SYS_STREAM:
|
352 |
+
stdouts[local_rank] = os.devnull
|
353 |
+
if stderrs[local_rank] == SYS_STREAM:
|
354 |
+
stderrs[local_rank] = os.devnull
|
355 |
+
|
356 |
+
error_file = os.path.join(clogdir, "error.json")
|
357 |
+
error_files[local_rank] = error_file
|
358 |
+
log.info("Setting worker%s reply file to: %s", local_rank, error_file)
|
359 |
+
envs[local_rank]["TORCHELASTIC_ERROR_FILE"] = error_file
|
360 |
+
|
361 |
+
return LogsDest(stdouts, stderrs, tee_stdouts, tee_stderrs, error_files)
|
362 |
+
|
363 |
+
def __repr__(self) -> str:
|
364 |
+
return (
|
365 |
+
f"DefaultLogsSpecs(root_log_dir={self._root_log_dir}, redirects={self._redirects}, "
|
366 |
+
f"tee={self._tee}, local_ranks_filter={self._local_ranks_filter})"
|
367 |
+
)
|
368 |
+
|
369 |
+
def __eq__(self, other: object) -> bool:
|
370 |
+
if not isinstance(other, DefaultLogsSpecs):
|
371 |
+
return False
|
372 |
+
|
373 |
+
return (
|
374 |
+
self._root_log_dir == other._root_log_dir
|
375 |
+
and self._redirects == other._redirects
|
376 |
+
and self._tee == other._tee
|
377 |
+
and self._local_ranks_filter == other._local_ranks_filter
|
378 |
+
)
|
379 |
+
|
380 |
+
|
381 |
+
@dataclass
|
382 |
+
class RunProcsResult:
|
383 |
+
"""
|
384 |
+
Results of a completed run of processes started with ``start_processes()``. Returned by ``PContext``.
|
385 |
+
|
386 |
+
Note the following:
|
387 |
+
|
388 |
+
1. All fields are mapped by local rank
|
389 |
+
2. ``return_values`` - only populated for functions (not the binaries).
|
390 |
+
3. ``stdouts`` - path to stdout.log (empty string if no redirect)
|
391 |
+
4. ``stderrs`` - path to stderr.log (empty string if no redirect)
|
392 |
+
|
393 |
+
"""
|
394 |
+
|
395 |
+
return_values: Dict[int, Any] = field(default_factory=dict)
|
396 |
+
failures: Dict[int, ProcessFailure] = field(default_factory=dict)
|
397 |
+
stdouts: Dict[int, str] = field(default_factory=dict)
|
398 |
+
stderrs: Dict[int, str] = field(default_factory=dict)
|
399 |
+
|
400 |
+
def is_failed(self) -> bool:
|
401 |
+
return len(self.failures) > 0
|
402 |
+
|
403 |
+
|
404 |
+
class PContext(abc.ABC):
|
405 |
+
"""
|
406 |
+
The base class that standardizes operations over a set of processes that are launched via different mechanisms.
|
407 |
+
|
408 |
+
The name ``PContext`` is intentional to disambiguate with ``torch.multiprocessing.ProcessContext``.
|
409 |
+
|
410 |
+
.. warning:: stdouts and stderrs should ALWAYS be a superset of
|
411 |
+
tee_stdouts and tee_stderrs (respectively) this is b/c
|
412 |
+
tee is implemented as a redirect + tail -f <stdout/stderr.log>
|
413 |
+
"""
|
414 |
+
|
415 |
+
def __init__(
|
416 |
+
self,
|
417 |
+
name: str,
|
418 |
+
entrypoint: Union[Callable, str],
|
419 |
+
args: Dict[int, Tuple],
|
420 |
+
envs: Dict[int, Dict[str, str]],
|
421 |
+
logs_specs: LogsSpecs,
|
422 |
+
log_line_prefixes: Optional[Dict[int, str]] = None,
|
423 |
+
|
424 |
+
):
|
425 |
+
self.name = name
|
426 |
+
# validate that all mappings have the same number of keys and
|
427 |
+
# all local ranks are accounted for
|
428 |
+
nprocs = len(args)
|
429 |
+
|
430 |
+
# TODO log_line_prefixes can be exanded too
|
431 |
+
logs_dest = logs_specs.reify(envs)
|
432 |
+
|
433 |
+
_validate_full_rank(logs_dest.stdouts, nprocs, "stdouts")
|
434 |
+
_validate_full_rank(logs_dest.stderrs, nprocs, "stderrs")
|
435 |
+
|
436 |
+
self.entrypoint = entrypoint
|
437 |
+
self.args = args
|
438 |
+
self.envs = envs
|
439 |
+
self.stdouts = logs_dest.stdouts
|
440 |
+
self.stderrs = logs_dest.stderrs
|
441 |
+
self.error_files = logs_dest.error_files
|
442 |
+
self.nprocs = nprocs
|
443 |
+
|
444 |
+
self._stdout_tail = TailLog(name, logs_dest.tee_stdouts, sys.stdout, log_line_prefixes)
|
445 |
+
self._stderr_tail = TailLog(name, logs_dest.tee_stderrs, sys.stderr, log_line_prefixes)
|
446 |
+
|
447 |
+
def start(self) -> None:
|
448 |
+
"""Start processes using parameters defined in the constructor."""
|
449 |
+
signal.signal(signal.SIGTERM, _terminate_process_handler)
|
450 |
+
signal.signal(signal.SIGINT, _terminate_process_handler)
|
451 |
+
if not IS_WINDOWS:
|
452 |
+
signal.signal(signal.SIGHUP, _terminate_process_handler)
|
453 |
+
signal.signal(signal.SIGQUIT, _terminate_process_handler)
|
454 |
+
self._start()
|
455 |
+
self._stdout_tail.start()
|
456 |
+
self._stderr_tail.start()
|
457 |
+
|
458 |
+
@abc.abstractmethod
|
459 |
+
def _start(self) -> None:
|
460 |
+
"""Start processes using strategy defined in a particular context."""
|
461 |
+
raise NotImplementedError()
|
462 |
+
|
463 |
+
@abc.abstractmethod
|
464 |
+
def _poll(self) -> Optional[RunProcsResult]:
|
465 |
+
"""
|
466 |
+
Poll the run status of the processes running under this context.
|
467 |
+
This method follows an "all-or-nothing" policy and returns
|
468 |
+
a ``RunProcessResults`` object if either all processes complete
|
469 |
+
successfully or any process fails. Returns ``None`` if
|
470 |
+
all processes are still running.
|
471 |
+
"""
|
472 |
+
raise NotImplementedError()
|
473 |
+
|
474 |
+
def wait(self, timeout: float = -1, period: float = 1) -> Optional[RunProcsResult]:
|
475 |
+
"""
|
476 |
+
Wait for the specified ``timeout`` seconds, polling every ``period`` seconds
|
477 |
+
for the processes to be done. Returns ``None`` if the processes are still running
|
478 |
+
on timeout expiry. Negative timeout values are interpreted as "wait-forever".
|
479 |
+
A timeout value of zero simply queries the status of the processes (e.g. equivalent
|
480 |
+
to a poll).
|
481 |
+
|
482 |
+
..note: Multiprocessing library registers SIGTERM and SIGINT signal handlers that raise
|
483 |
+
``SignalException`` when the signals received. It is up to the consumer of the code
|
484 |
+
to properly handle the exception. It is important not to swallow the exception otherwise
|
485 |
+
the process would not terminate. Example of the typical workflow can be:
|
486 |
+
|
487 |
+
.. code-block:: python
|
488 |
+
pc = start_processes(...)
|
489 |
+
try:
|
490 |
+
pc.wait(1)
|
491 |
+
.. do some other work
|
492 |
+
except SignalException as e:
|
493 |
+
pc.shutdown(e.sigval, timeout=30)
|
494 |
+
|
495 |
+
If SIGTERM or SIGINT occurs, the code above will try to shutdown child processes by propagating
|
496 |
+
received signal. If child processes will not terminate in the timeout time, the process will send
|
497 |
+
the SIGKILL.
|
498 |
+
"""
|
499 |
+
if timeout == 0:
|
500 |
+
return self._poll()
|
501 |
+
|
502 |
+
if timeout < 0:
|
503 |
+
timeout = sys.maxsize
|
504 |
+
|
505 |
+
expiry = time.time() + timeout
|
506 |
+
while time.time() < expiry:
|
507 |
+
pr = self._poll()
|
508 |
+
if pr:
|
509 |
+
return pr
|
510 |
+
time.sleep(period)
|
511 |
+
|
512 |
+
return None
|
513 |
+
|
514 |
+
@abc.abstractmethod
|
515 |
+
def pids(self) -> Dict[int, int]:
|
516 |
+
"""Return pids of processes mapped by their respective local_ranks."""
|
517 |
+
raise NotImplementedError()
|
518 |
+
|
519 |
+
@abc.abstractmethod
|
520 |
+
def _close(self, death_sig: signal.Signals, timeout: int = 30) -> None:
|
521 |
+
r"""
|
522 |
+
Terminates all processes managed by this context and cleans up any
|
523 |
+
meta resources (e.g. redirect, error_file files).
|
524 |
+
"""
|
525 |
+
raise NotImplementedError()
|
526 |
+
|
527 |
+
def close(
|
528 |
+
self, death_sig: Optional[signal.Signals] = None, timeout: int = 30
|
529 |
+
) -> None:
|
530 |
+
r"""
|
531 |
+
Terminates all processes managed by this context and cleans up any
|
532 |
+
meta resources (e.g. redirect, error_file files).
|
533 |
+
|
534 |
+
Args:
|
535 |
+
death_sig: Death signal to terminate processes.
|
536 |
+
timeout: Time to wait for processes to finish, if process is
|
537 |
+
still alive after this time, it will be terminated via SIGKILL.
|
538 |
+
"""
|
539 |
+
if not death_sig:
|
540 |
+
death_sig = _get_default_signal()
|
541 |
+
self._close(death_sig=death_sig, timeout=timeout)
|
542 |
+
if self._stdout_tail:
|
543 |
+
self._stdout_tail.stop()
|
544 |
+
if self._stderr_tail:
|
545 |
+
self._stderr_tail.stop()
|
546 |
+
|
547 |
+
|
548 |
+
def get_std_cm(std_rd: str, redirect_fn):
|
549 |
+
if IS_WINDOWS or IS_MACOS or not std_rd:
|
550 |
+
return nullcontext()
|
551 |
+
else:
|
552 |
+
return redirect_fn(std_rd)
|
553 |
+
|
554 |
+
|
555 |
+
def _wrap(
|
556 |
+
local_rank: int,
|
557 |
+
fn: Callable,
|
558 |
+
args: Dict[int, Tuple],
|
559 |
+
envs: Dict[int, Dict[str, str]],
|
560 |
+
stdout_redirects: Dict[int, str], # redirect file for stdout (to console if None)
|
561 |
+
stderr_redirects: Dict[int, str], # redirect file for stderr (to console if None)
|
562 |
+
ret_vals: Dict[int, mp.SimpleQueue],
|
563 |
+
queue_finished_reading_event: synchronize.Event,
|
564 |
+
) -> None:
|
565 |
+
# get the per-rank params up front so we fail fast if no mapping is found
|
566 |
+
args_ = args[local_rank]
|
567 |
+
env_ = envs[local_rank]
|
568 |
+
ret_val_ = ret_vals[local_rank]
|
569 |
+
|
570 |
+
stdout_rd = stdout_redirects[local_rank]
|
571 |
+
stderr_rd = stderr_redirects[local_rank]
|
572 |
+
|
573 |
+
stdout_cm = get_std_cm(stdout_rd, redirect_stdout)
|
574 |
+
stderr_cm = get_std_cm(stderr_rd, redirect_stderr)
|
575 |
+
|
576 |
+
for k, v in env_.items():
|
577 |
+
os.environ[k] = v
|
578 |
+
|
579 |
+
with stdout_cm, stderr_cm:
|
580 |
+
ret = record(fn)(*args_)
|
581 |
+
ret_val_.put(ret)
|
582 |
+
queue_finished_reading_event.wait()
|
583 |
+
|
584 |
+
|
585 |
+
class MultiprocessContext(PContext):
|
586 |
+
"""``PContext`` holding worker processes invoked as a function."""
|
587 |
+
|
588 |
+
def __init__(
|
589 |
+
self,
|
590 |
+
name: str,
|
591 |
+
entrypoint: Callable,
|
592 |
+
args: Dict[int, Tuple],
|
593 |
+
envs: Dict[int, Dict[str, str]],
|
594 |
+
start_method: str,
|
595 |
+
logs_specs: LogsSpecs,
|
596 |
+
log_line_prefixes: Optional[Dict[int, str]] = None,
|
597 |
+
):
|
598 |
+
super().__init__(
|
599 |
+
name,
|
600 |
+
entrypoint,
|
601 |
+
args,
|
602 |
+
envs,
|
603 |
+
logs_specs,
|
604 |
+
log_line_prefixes,
|
605 |
+
)
|
606 |
+
|
607 |
+
self.start_method = start_method
|
608 |
+
# each ret_val queue will always contain a single element.
|
609 |
+
self._ret_vals = {
|
610 |
+
local_rank: mp.get_context(self.start_method).SimpleQueue()
|
611 |
+
for local_rank in range(self.nprocs)
|
612 |
+
}
|
613 |
+
|
614 |
+
# see comments in ``join()`` for what this is
|
615 |
+
self._return_values: Dict[int, Any] = {}
|
616 |
+
self._pc: Optional[mp.ProcessContext] = None
|
617 |
+
# Note: set method should ONLY be invoked for the use case when all processes finished
|
618 |
+
# successfully. If any process died on event.wait() calling set() method will deadlock.
|
619 |
+
self._worker_finished_event = mp.get_context(self.start_method).Event()
|
620 |
+
|
621 |
+
def _start(self):
|
622 |
+
if self._pc:
|
623 |
+
raise ValueError(
|
624 |
+
"The process context already initialized."
|
625 |
+
" Most likely the start method got called twice."
|
626 |
+
)
|
627 |
+
self._pc = mp.start_processes(
|
628 |
+
fn=_wrap,
|
629 |
+
args=(
|
630 |
+
self.entrypoint,
|
631 |
+
self.args,
|
632 |
+
self.envs,
|
633 |
+
self.stdouts,
|
634 |
+
self.stderrs,
|
635 |
+
self._ret_vals,
|
636 |
+
self._worker_finished_event,
|
637 |
+
),
|
638 |
+
nprocs=self.nprocs,
|
639 |
+
join=False,
|
640 |
+
daemon=False,
|
641 |
+
start_method=self.start_method,
|
642 |
+
)
|
643 |
+
|
644 |
+
def _is_done(self) -> bool:
|
645 |
+
return len(self._return_values) == self.nprocs
|
646 |
+
|
647 |
+
def _poll(self) -> Optional[RunProcsResult]:
|
648 |
+
assert self._pc is not None # assertion for mypy type checker
|
649 |
+
|
650 |
+
try:
|
651 |
+
# torch.mp.ProcessContext Throws an Exception if some/all of
|
652 |
+
# worker processes failed
|
653 |
+
# timeout < 0 checks worker status and return immediately
|
654 |
+
# Join will never return success since we use synchronize.Event to wait
|
655 |
+
# for all processes to finish.
|
656 |
+
self._pc.join(-1)
|
657 |
+
|
658 |
+
# IMPORTANT: we use multiprocessing.Queue to carry worker return values
|
659 |
+
# back to the parent, the worker process will wait before terminating
|
660 |
+
# until all the buffered items are fed by the feeder thread to the underlying
|
661 |
+
# pipe. Hence to prevent deadlocks on large return values,
|
662 |
+
# we opportunistically try queue.get on each join call
|
663 |
+
# See: https://docs.python.org/2/library/multiprocessing.html#all-platforms
|
664 |
+
for local_rank in range(0, self.nprocs):
|
665 |
+
return_queue = self._ret_vals[local_rank]
|
666 |
+
if not return_queue.empty():
|
667 |
+
# save the return values temporarily into a member var
|
668 |
+
self._return_values[local_rank] = return_queue.get()
|
669 |
+
|
670 |
+
if self._is_done():
|
671 |
+
# we should ALWAYS have ALL the return values when all the processes are done
|
672 |
+
self._worker_finished_event.set()
|
673 |
+
# Wait untill all processes are finished. At this point workers finished executing
|
674 |
+
# user function
|
675 |
+
self._pc.join()
|
676 |
+
_validate_full_rank(
|
677 |
+
self._return_values, self.nprocs, "return_value queue"
|
678 |
+
)
|
679 |
+
self.close()
|
680 |
+
return RunProcsResult(
|
681 |
+
return_values=self._return_values,
|
682 |
+
stdouts=self.stdouts,
|
683 |
+
stderrs=self.stderrs,
|
684 |
+
)
|
685 |
+
else:
|
686 |
+
return None
|
687 |
+
except (mp.ProcessRaisedException, mp.ProcessExitedException) as e:
|
688 |
+
failed_local_rank = e.error_index
|
689 |
+
|
690 |
+
# entrypoint for MultiprocessContext will always be a Callable
|
691 |
+
fn_name = self.entrypoint.__qualname__ # type: ignore[union-attr]
|
692 |
+
failed_proc = self._pc.processes[failed_local_rank]
|
693 |
+
error_filepath = self.error_files[failed_local_rank]
|
694 |
+
|
695 |
+
log.exception(
|
696 |
+
"failed (exitcode: %s)"
|
697 |
+
" local_rank: %s (pid: %s)"
|
698 |
+
" of fn: %s (start_method: %s)",
|
699 |
+
failed_proc.exitcode,
|
700 |
+
failed_local_rank, e.pid,
|
701 |
+
fn_name, self.start_method,
|
702 |
+
)
|
703 |
+
|
704 |
+
self.close()
|
705 |
+
return RunProcsResult(
|
706 |
+
failures={
|
707 |
+
failed_local_rank: ProcessFailure(
|
708 |
+
local_rank=failed_local_rank,
|
709 |
+
pid=e.pid,
|
710 |
+
exitcode=failed_proc.exitcode,
|
711 |
+
error_file=error_filepath,
|
712 |
+
)
|
713 |
+
},
|
714 |
+
stdouts=self.stdouts,
|
715 |
+
stderrs=self.stderrs,
|
716 |
+
)
|
717 |
+
|
718 |
+
def pids(self) -> Dict[int, int]:
|
719 |
+
assert self._pc is not None # assertion for mypy type checking
|
720 |
+
return dict(enumerate(self._pc.pids()))
|
721 |
+
|
722 |
+
def _close(self, death_sig: signal.Signals, timeout: int = 30) -> None:
|
723 |
+
if not self._pc:
|
724 |
+
return
|
725 |
+
for proc in self._pc.processes:
|
726 |
+
if proc.is_alive():
|
727 |
+
log.warning("Closing process %s via signal %s", proc.pid, death_sig.name)
|
728 |
+
try:
|
729 |
+
os.kill(proc.pid, death_sig)
|
730 |
+
except ProcessLookupError:
|
731 |
+
# If the process exited because of some reason,
|
732 |
+
# `ProcessLookupError` will be raised, it is safe to ignore it.
|
733 |
+
pass
|
734 |
+
end = time.monotonic() + timeout
|
735 |
+
for proc in self._pc.processes:
|
736 |
+
time_to_wait = end - time.monotonic()
|
737 |
+
if time_to_wait <= 0:
|
738 |
+
break
|
739 |
+
proc.join(time_to_wait)
|
740 |
+
for proc in self._pc.processes:
|
741 |
+
if proc.is_alive():
|
742 |
+
log.warning(
|
743 |
+
"Unable to shutdown process %s via %s, forcefully exiting via %s",
|
744 |
+
proc.pid, death_sig, _get_kill_signal()
|
745 |
+
)
|
746 |
+
try:
|
747 |
+
os.kill(proc.pid, _get_kill_signal())
|
748 |
+
except ProcessLookupError:
|
749 |
+
# If the process exited because of some reason,
|
750 |
+
# `ProcessLookupError` will be raised, it is safe to ignore it.
|
751 |
+
pass
|
752 |
+
proc.join()
|
753 |
+
|
754 |
+
class SubprocessContext(PContext):
|
755 |
+
"""``PContext`` holding worker processes invoked as a binary."""
|
756 |
+
|
757 |
+
def __init__(
|
758 |
+
self,
|
759 |
+
name: str,
|
760 |
+
entrypoint: str,
|
761 |
+
args: Dict[int, Tuple],
|
762 |
+
envs: Dict[int, Dict[str, str]],
|
763 |
+
logs_specs: LogsSpecs,
|
764 |
+
log_line_prefixes: Optional[Dict[int, str]] = None,
|
765 |
+
|
766 |
+
):
|
767 |
+
super().__init__(
|
768 |
+
name,
|
769 |
+
entrypoint,
|
770 |
+
args,
|
771 |
+
envs,
|
772 |
+
logs_specs,
|
773 |
+
log_line_prefixes,
|
774 |
+
)
|
775 |
+
|
776 |
+
# state vector; _vdone[local_rank] -> is local_rank finished or not
|
777 |
+
self._running_local_ranks: Set[int] = set(range(self.nprocs))
|
778 |
+
self._failures: Dict[int, ProcessFailure] = {}
|
779 |
+
self.subprocess_handlers: Dict[int, SubprocessHandler] = {}
|
780 |
+
|
781 |
+
def _start(self):
|
782 |
+
if self.subprocess_handlers:
|
783 |
+
raise ValueError(
|
784 |
+
"The subprocess handlers already initialized. Most likely the start method got called twice."
|
785 |
+
)
|
786 |
+
self.subprocess_handlers = {
|
787 |
+
local_rank: get_subprocess_handler(
|
788 |
+
entrypoint=self.entrypoint, # type: ignore[arg-type] # entrypoint is always a str
|
789 |
+
args=self.args[local_rank],
|
790 |
+
env=self.envs[local_rank],
|
791 |
+
stdout=self.stdouts[local_rank],
|
792 |
+
stderr=self.stderrs[local_rank],
|
793 |
+
local_rank_id=local_rank,
|
794 |
+
)
|
795 |
+
for local_rank in range(self.nprocs)
|
796 |
+
}
|
797 |
+
|
798 |
+
def _poll(self) -> Optional[RunProcsResult]:
|
799 |
+
done_local_ranks = set()
|
800 |
+
for local_rank in self._running_local_ranks:
|
801 |
+
handler = self.subprocess_handlers[local_rank]
|
802 |
+
exitcode = handler.proc.poll()
|
803 |
+
if exitcode is not None:
|
804 |
+
done_local_ranks.add(local_rank)
|
805 |
+
if exitcode != 0: # failed or signaled
|
806 |
+
self._failures[local_rank] = ProcessFailure(
|
807 |
+
local_rank=local_rank,
|
808 |
+
pid=handler.proc.pid,
|
809 |
+
exitcode=exitcode,
|
810 |
+
error_file=self.error_files[local_rank],
|
811 |
+
)
|
812 |
+
# else: --> succeeded; nothing to do
|
813 |
+
|
814 |
+
self._running_local_ranks.difference_update(done_local_ranks)
|
815 |
+
|
816 |
+
# if ALL procs are finished or ANY have failed
|
817 |
+
if not self._running_local_ranks or self._failures:
|
818 |
+
self.close() # terminate all running procs
|
819 |
+
result = RunProcsResult(
|
820 |
+
failures=self._failures,
|
821 |
+
stdouts=self.stdouts,
|
822 |
+
stderrs=self.stderrs,
|
823 |
+
)
|
824 |
+
if result.is_failed():
|
825 |
+
first_failure = min(result.failures.values(), key=lambda f: f.timestamp)
|
826 |
+
log.error(
|
827 |
+
"failed (exitcode: %s)"
|
828 |
+
" local_rank: %s (pid: %s)"
|
829 |
+
" of binary: %s",
|
830 |
+
first_failure.exitcode, first_failure.local_rank, first_failure.pid, self.entrypoint
|
831 |
+
)
|
832 |
+
else:
|
833 |
+
# Populate return with dummy values. This provides consistency with MultiprocessingHandler
|
834 |
+
result.return_values = dict.fromkeys(range(self.nprocs))
|
835 |
+
|
836 |
+
return result
|
837 |
+
else: # there are no failures and procs still running
|
838 |
+
return None
|
839 |
+
|
840 |
+
def pids(self) -> Dict[int, int]:
|
841 |
+
return {
|
842 |
+
local_rank: sh.proc.pid
|
843 |
+
for local_rank, sh in self.subprocess_handlers.items()
|
844 |
+
}
|
845 |
+
|
846 |
+
def _close(self, death_sig: signal.Signals, timeout: int = 30) -> None:
|
847 |
+
if not self.subprocess_handlers:
|
848 |
+
return
|
849 |
+
for handler in self.subprocess_handlers.values():
|
850 |
+
if handler.proc.poll() is None:
|
851 |
+
log.warning(
|
852 |
+
"Sending process %s closing signal %s", handler.proc.pid, death_sig.name
|
853 |
+
)
|
854 |
+
handler.close(death_sig=death_sig)
|
855 |
+
end = time.monotonic() + timeout
|
856 |
+
for handler in self.subprocess_handlers.values():
|
857 |
+
time_to_wait = end - time.monotonic()
|
858 |
+
if time_to_wait <= 0:
|
859 |
+
break
|
860 |
+
try:
|
861 |
+
handler.proc.wait(time_to_wait)
|
862 |
+
except subprocess.TimeoutExpired:
|
863 |
+
# Ignore the timeout expired exception, since
|
864 |
+
# the child process will be forcefully terminated via SIGKILL
|
865 |
+
pass
|
866 |
+
for handler in self.subprocess_handlers.values():
|
867 |
+
if handler.proc.poll() is None:
|
868 |
+
log.warning(
|
869 |
+
"Unable to shutdown process %s via %s, forcefully exiting via %s",
|
870 |
+
handler.proc.pid, death_sig, _get_kill_signal()
|
871 |
+
)
|
872 |
+
handler.close(death_sig=_get_kill_signal())
|
873 |
+
handler.proc.wait()
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py
ADDED
@@ -0,0 +1,375 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
|
3 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
4 |
+
# All rights reserved.
|
5 |
+
#
|
6 |
+
# This source code is licensed under the BSD-style license found in the
|
7 |
+
# LICENSE file in the root directory of this source tree.
|
8 |
+
|
9 |
+
"""
|
10 |
+
Each host in a distributed PyTorch job runs with a single TorchElastic agent,
|
11 |
+
and multiple workers (as children processes of the TorchElastic agent).
|
12 |
+
Since the workers are user-provided (your PyTorch script/job), TorchElastic
|
13 |
+
has a way to propagate errors on the trainers through the agent and up to the
|
14 |
+
scheduler, which ultimately informs the end-user about the state of the job
|
15 |
+
and applies any retry policies.
|
16 |
+
|
17 |
+
TorchElastic categorizes errors into 3 categories:
|
18 |
+
|
19 |
+
+----------------+----------------+--------------------------------------------------------------+
|
20 |
+
| Category | Sub-Category | Description |
|
21 |
+
+================+================+==============================================================+
|
22 |
+
| User Error | Input Error | invalid inputs to TorchElastic APIs (e.g. min > max nodes) |
|
23 |
+
| +----------------+--------------------------------------------------------------+
|
24 |
+
| | Worker Failure | any failures on the worker child process |
|
25 |
+
+----------------+----------------+--------------------------------------------------------------+
|
26 |
+
| Platform Error | n/a | failures caused by the agent |
|
27 |
+
+----------------+----------------+--------------------------------------------------------------+
|
28 |
+
| Infra Error | n/a | failures outside the domain of the agent and workers |
|
29 |
+
| | | (e.g. host failures) |
|
30 |
+
+----------------+----------------+--------------------------------------------------------------+
|
31 |
+
|
32 |
+
All errors other than "Worker Failure" are either raised canonically from the
|
33 |
+
agent process or implicitly or explicitly crash the agent process. So the
|
34 |
+
standard language (python) provided exception handling strategies apply.
|
35 |
+
|
36 |
+
Worker Failures are special because the exception/failure originates on a different
|
37 |
+
process from the agent so the error needs to be propagated inter-process
|
38 |
+
(e.g. the agent cannot simply ``try-catch`` an exception raised on the worker process).
|
39 |
+
|
40 |
+
TorchElastic agents use :func:`torch.distributed.elastic.multiprocessing.start_processes`
|
41 |
+
to launch the workers which has a simple file based inter-process error propagation
|
42 |
+
built-in.
|
43 |
+
|
44 |
+
Any function or binary entrypoint decorated with :func:`record`
|
45 |
+
will write uncaught exceptions (with the trace information) to a file specified by the
|
46 |
+
environment variable ``TORCHELASTIC_ERROR_FILE``. The parent process (e.g. agent)
|
47 |
+
sets this env var on each child it launches, then aggregates the error files for all
|
48 |
+
children, and propagates the one with the **smallest** timestamp (e.g. the **first** error).
|
49 |
+
"""
|
50 |
+
|
51 |
+
import json
|
52 |
+
import os
|
53 |
+
import signal
|
54 |
+
import socket
|
55 |
+
import time
|
56 |
+
import warnings
|
57 |
+
from dataclasses import dataclass, field
|
58 |
+
from datetime import datetime
|
59 |
+
from functools import wraps
|
60 |
+
from string import Template
|
61 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar
|
62 |
+
|
63 |
+
from torch.distributed.elastic.utils.logging import get_logger
|
64 |
+
|
65 |
+
from .error_handler import ErrorHandler # noqa: F401
|
66 |
+
from .handlers import get_error_handler # noqa: F401
|
67 |
+
|
68 |
+
__all__ = ["ProcessFailure", "ChildFailedError", "record", "ErrorHandler", "get_error_handler"]
|
69 |
+
|
70 |
+
log = get_logger(__name__)
|
71 |
+
|
72 |
+
|
73 |
+
JSON = Dict
|
74 |
+
|
75 |
+
_EMPTY_ERROR_DATA = {"message": "<NONE>"}
|
76 |
+
_NOT_AVAILABLE = "<N/A>"
|
77 |
+
|
78 |
+
T = TypeVar("T")
|
79 |
+
|
80 |
+
|
81 |
+
@dataclass
|
82 |
+
class ProcessFailure:
|
83 |
+
"""
|
84 |
+
Represent the failed process result. When the worker process fails, it may record failure root cause into the file.
|
85 |
+
|
86 |
+
Tries to read the failure timestamp from the provided ``error_file``,
|
87 |
+
if the ``error_file`` does not exist, the timestamp is the current
|
88 |
+
timestamp (seconds since epoch).
|
89 |
+
|
90 |
+
The ``message`` field is a concise explanation of the failure. If
|
91 |
+
the error file exists then the message is obtained from the error file.
|
92 |
+
Otherwise one is generated based on the failure signature.
|
93 |
+
|
94 |
+
.. note:: It is assumed that the ``error_file`` is written by
|
95 |
+
``torch.distributed.elastic.multiprocessing.errors.error_handler.ErrorHandler``.
|
96 |
+
Otherwise the behavior is undefined.
|
97 |
+
|
98 |
+
"""
|
99 |
+
|
100 |
+
local_rank: int
|
101 |
+
pid: int
|
102 |
+
exitcode: int
|
103 |
+
error_file: str
|
104 |
+
error_file_data: JSON = field(init=False)
|
105 |
+
message: str = field(init=False)
|
106 |
+
timestamp: int = field(init=False)
|
107 |
+
|
108 |
+
def __post_init__(self):
|
109 |
+
self.error_file_data = _EMPTY_ERROR_DATA
|
110 |
+
if os.path.isfile(self.error_file):
|
111 |
+
try:
|
112 |
+
with open(self.error_file) as fp:
|
113 |
+
self.error_file_data = json.load(fp)
|
114 |
+
log.debug(
|
115 |
+
"User process failed with error data: %s", json.dumps(self.error_file_data, indent=2)
|
116 |
+
)
|
117 |
+
self.message, self.timestamp = self._get_error_data(
|
118 |
+
self.error_file_data
|
119 |
+
)
|
120 |
+
except Exception:
|
121 |
+
log.exception("Failed to parse reply file: %s", self.error_file)
|
122 |
+
raise
|
123 |
+
else:
|
124 |
+
self._set_no_reply_file()
|
125 |
+
|
126 |
+
# make up an informative message if not already present
|
127 |
+
if not self.message:
|
128 |
+
# signals typically do not generate an error file message
|
129 |
+
if self.exitcode < 0:
|
130 |
+
self.message = (
|
131 |
+
f"Signal {-self.exitcode} ({self.signal_name()})"
|
132 |
+
f" received by PID {self.pid}"
|
133 |
+
)
|
134 |
+
else:
|
135 |
+
self.message = "To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html"
|
136 |
+
|
137 |
+
def _get_error_data(self, error_file_data: Dict[str, Any]) -> Tuple[str, int]:
|
138 |
+
message = error_file_data["message"]
|
139 |
+
if isinstance(message, str):
|
140 |
+
timestamp = int(error_file_data.get("timestamp", 0))
|
141 |
+
else:
|
142 |
+
timestamp = int(message["extraInfo"]["timestamp"])
|
143 |
+
return (message, timestamp)
|
144 |
+
|
145 |
+
def _set_no_reply_file(self):
|
146 |
+
self.error_file = _NOT_AVAILABLE
|
147 |
+
self.error_file_data = _EMPTY_ERROR_DATA
|
148 |
+
self.message = ""
|
149 |
+
self.timestamp = int(time.time())
|
150 |
+
|
151 |
+
def signal_name(self) -> str:
|
152 |
+
if self.exitcode < 0:
|
153 |
+
# We don't want to kill the parent process trying to find the signal name.
|
154 |
+
# if the signal doesn't map to a known name, use not available.
|
155 |
+
try:
|
156 |
+
return signal.Signals(-self.exitcode).name
|
157 |
+
except Exception:
|
158 |
+
return _NOT_AVAILABLE
|
159 |
+
else:
|
160 |
+
return _NOT_AVAILABLE
|
161 |
+
|
162 |
+
def timestamp_isoformat(self):
|
163 |
+
"""Return timestamp in ISO format (YYYY-MM-DD_HH:MM:SS)."""
|
164 |
+
return datetime.fromtimestamp(self.timestamp).isoformat(sep="_")
|
165 |
+
|
166 |
+
|
167 |
+
GlobalRank = int
|
168 |
+
|
169 |
+
_FAILURE_FORMAT_TEMPLATE = """[${idx}]:
|
170 |
+
time : ${time}
|
171 |
+
host : ${hostname}
|
172 |
+
rank : ${rank} (local_rank: ${local_rank})
|
173 |
+
exitcode : ${exitcode} (pid: ${pid})
|
174 |
+
error_file: ${error_file}
|
175 |
+
traceback : ${message}"""
|
176 |
+
|
177 |
+
# extra new lines before and after are intentional
|
178 |
+
_MSG_FORMAT_TEMPLATE = """
|
179 |
+
${boarder}
|
180 |
+
${title}
|
181 |
+
${section}
|
182 |
+
Failures:
|
183 |
+
${other_failures}
|
184 |
+
${section}
|
185 |
+
Root Cause (first observed failure):
|
186 |
+
${root_failure}
|
187 |
+
${boarder}"""
|
188 |
+
|
189 |
+
|
190 |
+
class ChildFailedError(Exception):
|
191 |
+
"""
|
192 |
+
Special exception type that can be raised from a function annotated with the
|
193 |
+
``@record`` decorator to have the child process' (root exception) propagate
|
194 |
+
up the stack as-is (e.g. without being wrapped in the parent's traceback).
|
195 |
+
|
196 |
+
Useful in cases where the parent is a simple nanny process
|
197 |
+
and the child (worker) processes are actually doing meaningful compute.
|
198 |
+
In this case, errors typically occur on the child process as the parent
|
199 |
+
is not doing anything non-trivial, and child errors should be propagated
|
200 |
+
to the scheduler for accurate root cause diagnostics.
|
201 |
+
|
202 |
+
.. note:: The propagation relies on error files rather than exception handling to
|
203 |
+
support both function and binary launches.
|
204 |
+
|
205 |
+
Example:
|
206 |
+
::
|
207 |
+
|
208 |
+
# process tree on a host (container)
|
209 |
+
0: scheduler-init-process:
|
210 |
+
|- 1: torchelastic_agent:
|
211 |
+
|- 2: trainer_0 (ok)
|
212 |
+
|- 3: trainer_1 (fail) -> error.json
|
213 |
+
|- ...
|
214 |
+
|- n+2: trainer_n (ok)
|
215 |
+
|- n+3: other processes
|
216 |
+
|- ...
|
217 |
+
|
218 |
+
In the example above, trainer 1's failure (written into error.json) is
|
219 |
+
the root cause and should be reported to the scheduler's init process.
|
220 |
+
The torchelastic agent raises a ``ChildFailedError("trainer", {1: "trainer_1/error.json"})``
|
221 |
+
upon detecting trainer 1's failure which would propagate the contents
|
222 |
+
of trainer 1's error file to the scheduler's init process.
|
223 |
+
"""
|
224 |
+
|
225 |
+
def __init__(self, name: str, failures: Dict[GlobalRank, ProcessFailure]):
|
226 |
+
self.name = name
|
227 |
+
self.failures = failures
|
228 |
+
assert (
|
229 |
+
self.failures
|
230 |
+
) # does not make sense to create a ChildFaileError with no failures
|
231 |
+
super().__init__(self.format_msg())
|
232 |
+
|
233 |
+
def get_first_failure(self) -> Tuple[GlobalRank, ProcessFailure]:
|
234 |
+
rank = min(self.failures.keys(), key=lambda r: self.failures[r].timestamp)
|
235 |
+
return rank, self.failures[rank]
|
236 |
+
|
237 |
+
def format_msg(self, boarder_delim="=", section_delim="-"):
|
238 |
+
title = f"{self.name} FAILED"
|
239 |
+
root_rank, root_failure = self.get_first_failure()
|
240 |
+
|
241 |
+
root_failure_fmt: str = ""
|
242 |
+
other_failures_fmt: List[str] = []
|
243 |
+
width = len(title)
|
244 |
+
for idx, (rank, failure) in enumerate(self.failures.items()):
|
245 |
+
fmt, w = self._format_failure(idx, rank, failure)
|
246 |
+
width = max(width, w)
|
247 |
+
if rank == root_rank:
|
248 |
+
root_failure_fmt = fmt
|
249 |
+
else:
|
250 |
+
other_failures_fmt.append(fmt)
|
251 |
+
|
252 |
+
# upper boundary on width
|
253 |
+
width = min(width, 60)
|
254 |
+
|
255 |
+
return Template(_MSG_FORMAT_TEMPLATE).substitute(
|
256 |
+
boarder=boarder_delim * width,
|
257 |
+
title=title,
|
258 |
+
section=section_delim * width,
|
259 |
+
root_failure=root_failure_fmt,
|
260 |
+
other_failures="\n".join(other_failures_fmt or [" <NO_OTHER_FAILURES>"]),
|
261 |
+
)
|
262 |
+
|
263 |
+
def _format_failure(
|
264 |
+
self, idx: int, rank: int, failure: ProcessFailure
|
265 |
+
) -> Tuple[str, int]:
|
266 |
+
|
267 |
+
# failure.message is either a str (when the failure does not generate a traceback - e.g. signals)
|
268 |
+
# or a dict (json) of the form
|
269 |
+
# {"message": $ERROR_MSG, "extraInfo": {"py_callstack": $TRACEBACK, timestamp: $TS}}
|
270 |
+
# so the display logic is:
|
271 |
+
# 1. if failure.message is not a dict (it is a str) just show it as is
|
272 |
+
# 2. else try to get the traceback (py_callstack)
|
273 |
+
# 3. if the traceback is not there, use the message
|
274 |
+
# 4. if the message is not there show <N/A>
|
275 |
+
msg = failure.message
|
276 |
+
if isinstance(failure.message, dict):
|
277 |
+
msg = (
|
278 |
+
failure.message.get("extraInfo", {})
|
279 |
+
.get("py_callstack", failure.message.get("message", "<N/A>"))
|
280 |
+
.replace("\n", "\n ") # to properly indent the traceback
|
281 |
+
)
|
282 |
+
|
283 |
+
fmt = Template(_FAILURE_FORMAT_TEMPLATE).substitute(
|
284 |
+
idx=idx,
|
285 |
+
time=failure.timestamp_isoformat(),
|
286 |
+
hostname=socket.getfqdn(),
|
287 |
+
rank=rank,
|
288 |
+
local_rank=failure.local_rank,
|
289 |
+
exitcode=failure.exitcode,
|
290 |
+
pid=failure.pid,
|
291 |
+
error_file=failure.error_file,
|
292 |
+
message=msg,
|
293 |
+
)
|
294 |
+
width = 0
|
295 |
+
for line in fmt.split("\n"):
|
296 |
+
width = max(width, len(line))
|
297 |
+
return fmt, width
|
298 |
+
|
299 |
+
|
300 |
+
def record(
|
301 |
+
fn: Callable[..., T], error_handler: Optional[ErrorHandler] = None
|
302 |
+
) -> Callable[..., T]:
|
303 |
+
"""
|
304 |
+
Syntactic sugar to record errors/exceptions that happened in the decorated
|
305 |
+
function using the provided ``error_handler``.
|
306 |
+
|
307 |
+
Using this decorator is equivalent to:
|
308 |
+
|
309 |
+
::
|
310 |
+
|
311 |
+
error_handler = get_error_handler()
|
312 |
+
error_handler.initialize()
|
313 |
+
try:
|
314 |
+
foobar()
|
315 |
+
except ChildFailedError as e:
|
316 |
+
_, failure = e.get_first_failure()
|
317 |
+
error_handler.dump_error_file(failure.error_file, failure.exitcode)
|
318 |
+
raise
|
319 |
+
except Exception as e:
|
320 |
+
error_handler.record(e)
|
321 |
+
raise
|
322 |
+
|
323 |
+
.. important:: use this decorator once per process at the top level method,
|
324 |
+
typically this is the main method.
|
325 |
+
|
326 |
+
Example
|
327 |
+
|
328 |
+
::
|
329 |
+
|
330 |
+
@record
|
331 |
+
def main():
|
332 |
+
pass
|
333 |
+
|
334 |
+
if __name__=="__main__":
|
335 |
+
main()
|
336 |
+
|
337 |
+
"""
|
338 |
+
if not error_handler:
|
339 |
+
error_handler = get_error_handler()
|
340 |
+
|
341 |
+
def wrap(f):
|
342 |
+
@wraps(f)
|
343 |
+
def wrapper(*args, **kwargs):
|
344 |
+
assert error_handler is not None # assertion for mypy type checker
|
345 |
+
error_handler.initialize()
|
346 |
+
try:
|
347 |
+
return f(*args, **kwargs)
|
348 |
+
except SystemExit as se:
|
349 |
+
# For run_path based entrypoints, SystemExit with code = 0 will never exit.
|
350 |
+
# Handling it here by returning a value:
|
351 |
+
if se.code == 0:
|
352 |
+
return None
|
353 |
+
else:
|
354 |
+
raise
|
355 |
+
except ChildFailedError as e:
|
356 |
+
rank, failure = e.get_first_failure()
|
357 |
+
if failure.error_file != _NOT_AVAILABLE:
|
358 |
+
error_handler.dump_error_file(failure.error_file, failure.exitcode)
|
359 |
+
else:
|
360 |
+
log.info(
|
361 |
+
(
|
362 |
+
"local_rank %s FAILED with no error file."
|
363 |
+
" Decorate your entrypoint fn with @record for traceback info."
|
364 |
+
" See: https://pytorch.org/docs/stable/elastic/errors.html",
|
365 |
+
rank
|
366 |
+
)
|
367 |
+
)
|
368 |
+
raise
|
369 |
+
except Exception as e:
|
370 |
+
error_handler.record_exception(e)
|
371 |
+
raise
|
372 |
+
|
373 |
+
return wrapper
|
374 |
+
|
375 |
+
return wrap(fn)
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (12.5 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/error_handler.cpython-310.pyc
ADDED
Binary file (5.2 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/handlers.cpython-310.pyc
ADDED
Binary file (446 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/error_handler.py
ADDED
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
|
3 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
4 |
+
# All rights reserved.
|
5 |
+
#
|
6 |
+
# This source code is licensed under the BSD-style license found in the
|
7 |
+
# LICENSE file in the root directory of this source tree.
|
8 |
+
import faulthandler
|
9 |
+
import json
|
10 |
+
import logging
|
11 |
+
import os
|
12 |
+
import time
|
13 |
+
import traceback
|
14 |
+
import warnings
|
15 |
+
from typing import Any, Dict, Optional
|
16 |
+
|
17 |
+
__all__ = ['ErrorHandler']
|
18 |
+
|
19 |
+
log = logging.getLogger(__name__)
|
20 |
+
|
21 |
+
|
22 |
+
class ErrorHandler:
|
23 |
+
"""
|
24 |
+
Write the provided exception object along with some other metadata about
|
25 |
+
the error in a structured way in JSON format to an error file specified by the
|
26 |
+
environment variable: ``TORCHELASTIC_ERROR_FILE``. If this environment
|
27 |
+
variable is not set, then simply logs the contents of what would have been
|
28 |
+
written to the error file.
|
29 |
+
|
30 |
+
This handler may be subclassed to customize the handling of the error.
|
31 |
+
Subclasses should override ``initialize()`` and ``record_exception()``.
|
32 |
+
"""
|
33 |
+
|
34 |
+
def _get_error_file_path(self) -> Optional[str]:
|
35 |
+
"""
|
36 |
+
Return the error file path.
|
37 |
+
|
38 |
+
May return ``None`` to have the structured error be logged only.
|
39 |
+
"""
|
40 |
+
return os.environ.get("TORCHELASTIC_ERROR_FILE", None)
|
41 |
+
|
42 |
+
def initialize(self) -> None:
|
43 |
+
"""
|
44 |
+
Call prior to running code that we wish to capture errors/exceptions.
|
45 |
+
|
46 |
+
Typically registers signal/fault handlers. Users can override this
|
47 |
+
function to add custom initialization/registrations that aid in
|
48 |
+
propagation/information of errors/signals/exceptions/faults.
|
49 |
+
"""
|
50 |
+
try:
|
51 |
+
faulthandler.enable(all_threads=True)
|
52 |
+
except Exception as e:
|
53 |
+
warnings.warn(f"Unable to enable fault handler. {type(e).__name__}: {e}")
|
54 |
+
|
55 |
+
def _write_error_file(self, file_path: str, error_msg: str) -> None:
|
56 |
+
"""Write error message to the file."""
|
57 |
+
try:
|
58 |
+
with open(file_path, "w") as fp:
|
59 |
+
fp.write(error_msg)
|
60 |
+
except Exception as e:
|
61 |
+
warnings.warn(f"Unable to write error to file. {type(e).__name__}: {e}")
|
62 |
+
|
63 |
+
def record_exception(self, e: BaseException) -> None:
|
64 |
+
"""
|
65 |
+
Write a structured information about the exception into an error file in JSON format.
|
66 |
+
|
67 |
+
If the error file cannot be determined, then logs the content
|
68 |
+
that would have been written to the error file.
|
69 |
+
"""
|
70 |
+
file = self._get_error_file_path()
|
71 |
+
if file:
|
72 |
+
data = {
|
73 |
+
"message": {
|
74 |
+
"message": f"{type(e).__name__}: {e}",
|
75 |
+
"extraInfo": {
|
76 |
+
"py_callstack": traceback.format_exc(),
|
77 |
+
"timestamp": str(int(time.time())),
|
78 |
+
},
|
79 |
+
}
|
80 |
+
}
|
81 |
+
with open(file, "w") as fp:
|
82 |
+
json.dump(data, fp)
|
83 |
+
|
84 |
+
def override_error_code_in_rootcause_data(
|
85 |
+
self,
|
86 |
+
rootcause_error_file: str,
|
87 |
+
rootcause_error: Dict[str, Any],
|
88 |
+
error_code: int = 0,
|
89 |
+
):
|
90 |
+
"""Modify the rootcause_error read from the file, to correctly set the exit code."""
|
91 |
+
if "message" not in rootcause_error:
|
92 |
+
log.warning(
|
93 |
+
"child error file (%s) does not have field `message`. \n"
|
94 |
+
"cannot override error code: %s",
|
95 |
+
rootcause_error_file, error_code
|
96 |
+
)
|
97 |
+
elif isinstance(rootcause_error["message"], str):
|
98 |
+
log.warning(
|
99 |
+
"child error file (%s) has a new message format. \n"
|
100 |
+
"skipping error code override",
|
101 |
+
rootcause_error_file
|
102 |
+
)
|
103 |
+
else:
|
104 |
+
rootcause_error["message"]["errorCode"] = error_code
|
105 |
+
|
106 |
+
def dump_error_file(self, rootcause_error_file: str, error_code: int = 0):
|
107 |
+
"""Dump parent error file from child process's root cause error and error code."""
|
108 |
+
with open(rootcause_error_file) as fp:
|
109 |
+
rootcause_error = json.load(fp)
|
110 |
+
# Override error code since the child process cannot capture the error code if it
|
111 |
+
# is terminated by signals like SIGSEGV.
|
112 |
+
if error_code:
|
113 |
+
self.override_error_code_in_rootcause_data(rootcause_error_file, rootcause_error, error_code)
|
114 |
+
log.debug(
|
115 |
+
"child error file (%s) contents:\n"
|
116 |
+
"%s",
|
117 |
+
rootcause_error_file, json.dumps(rootcause_error, indent=2)
|
118 |
+
)
|
119 |
+
|
120 |
+
my_error_file = self._get_error_file_path()
|
121 |
+
if my_error_file:
|
122 |
+
# Guard against existing error files
|
123 |
+
# This can happen when the child is created using multiprocessing
|
124 |
+
# and the same env var (TORCHELASTIC_ERROR_FILE) is used on the
|
125 |
+
# parent and child to specify the error files (respectively)
|
126 |
+
# because the env vars on the child is set in the wrapper function
|
127 |
+
# and by default the child inherits the parent's env vars, if the child
|
128 |
+
# process receives a signal before the wrapper function kicks in
|
129 |
+
# and the signal handler writes to the error file, then the child
|
130 |
+
# will write to the parent's error file. In this case just log the
|
131 |
+
# original error file contents and overwrite the error file.
|
132 |
+
self._rm(my_error_file)
|
133 |
+
self._write_error_file(my_error_file, json.dumps(rootcause_error))
|
134 |
+
log.info("dumped error file to parent's %s", my_error_file)
|
135 |
+
else:
|
136 |
+
log.error(
|
137 |
+
"no error file defined for parent, to copy child error file (%s)", rootcause_error_file
|
138 |
+
)
|
139 |
+
|
140 |
+
def _rm(self, my_error_file):
|
141 |
+
if os.path.isfile(my_error_file):
|
142 |
+
# Log the contents of the original file.
|
143 |
+
with open(my_error_file) as fp:
|
144 |
+
try:
|
145 |
+
original = json.dumps(json.load(fp), indent=2)
|
146 |
+
log.warning(
|
147 |
+
"%s already exists"
|
148 |
+
" and will be overwritten."
|
149 |
+
" Original contents:\n%s",
|
150 |
+
my_error_file, original
|
151 |
+
)
|
152 |
+
except json.decoder.JSONDecodeError as err:
|
153 |
+
log.warning(
|
154 |
+
"%s already exists"
|
155 |
+
" and will be overwritten."
|
156 |
+
" Unable to load original contents:\n",
|
157 |
+
my_error_file
|
158 |
+
)
|
159 |
+
os.remove(my_error_file)
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/handlers.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
|
3 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
4 |
+
# All rights reserved.
|
5 |
+
#
|
6 |
+
# This source code is licensed under the BSD-style license found in the
|
7 |
+
# LICENSE file in the root directory of this source tree.
|
8 |
+
# Multiprocessing error-reporting module
|
9 |
+
|
10 |
+
|
11 |
+
from torch.distributed.elastic.multiprocessing.errors.error_handler import ErrorHandler
|
12 |
+
|
13 |
+
__all__ = ['get_error_handler']
|
14 |
+
|
15 |
+
def get_error_handler():
|
16 |
+
return ErrorHandler()
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/redirects.py
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# !/usr/bin/env python3
|
2 |
+
|
3 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
4 |
+
# All rights reserved.
|
5 |
+
#
|
6 |
+
# This source code is licensed under the BSD-style license found in the
|
7 |
+
# LICENSE file in the root directory of this source tree.
|
8 |
+
|
9 |
+
# Taken and modified from original source:
|
10 |
+
# https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/
|
11 |
+
import ctypes
|
12 |
+
import logging
|
13 |
+
import os
|
14 |
+
import sys
|
15 |
+
from contextlib import contextmanager
|
16 |
+
from functools import partial
|
17 |
+
|
18 |
+
IS_WINDOWS = sys.platform == "win32"
|
19 |
+
IS_MACOS = sys.platform == "darwin"
|
20 |
+
|
21 |
+
|
22 |
+
logger = logging.getLogger(__name__)
|
23 |
+
|
24 |
+
|
25 |
+
def get_libc():
|
26 |
+
if IS_WINDOWS or IS_MACOS:
|
27 |
+
logger.warning(
|
28 |
+
"NOTE: Redirects are currently not supported in Windows or MacOs."
|
29 |
+
)
|
30 |
+
return None
|
31 |
+
else:
|
32 |
+
return ctypes.CDLL("libc.so.6")
|
33 |
+
|
34 |
+
|
35 |
+
libc = get_libc()
|
36 |
+
|
37 |
+
|
38 |
+
def _c_std(stream: str):
|
39 |
+
return ctypes.c_void_p.in_dll(libc, stream)
|
40 |
+
|
41 |
+
|
42 |
+
def _python_std(stream: str):
|
43 |
+
return {"stdout": sys.stdout, "stderr": sys.stderr}[stream]
|
44 |
+
|
45 |
+
|
46 |
+
_VALID_STD = {"stdout", "stderr"}
|
47 |
+
|
48 |
+
|
49 |
+
@contextmanager
|
50 |
+
def redirect(std: str, to_file: str):
|
51 |
+
"""
|
52 |
+
Redirect ``std`` (one of ``"stdout"`` or ``"stderr"``) to a file in the path specified by ``to_file``.
|
53 |
+
|
54 |
+
This method redirects the underlying std file descriptor (not just python's ``sys.stdout|stderr``).
|
55 |
+
See usage for details.
|
56 |
+
|
57 |
+
Directory of ``dst_filename`` is assumed to exist and the destination file
|
58 |
+
is overwritten if it already exists.
|
59 |
+
|
60 |
+
.. note:: Due to buffering cross source writes are not guaranteed to
|
61 |
+
appear in wall-clock order. For instance in the example below
|
62 |
+
it is possible for the C-outputs to appear before the python
|
63 |
+
outputs in the log file.
|
64 |
+
|
65 |
+
Usage:
|
66 |
+
|
67 |
+
::
|
68 |
+
|
69 |
+
# syntactic-sugar for redirect("stdout", "tmp/stdout.log")
|
70 |
+
with redirect_stdout("/tmp/stdout.log"):
|
71 |
+
print("python stdouts are redirected")
|
72 |
+
libc = ctypes.CDLL("libc.so.6")
|
73 |
+
libc.printf(b"c stdouts are also redirected"
|
74 |
+
os.system("echo system stdouts are also redirected")
|
75 |
+
|
76 |
+
print("stdout restored")
|
77 |
+
|
78 |
+
"""
|
79 |
+
if std not in _VALID_STD:
|
80 |
+
raise ValueError(
|
81 |
+
f"unknown standard stream <{std}>, must be one of {_VALID_STD}"
|
82 |
+
)
|
83 |
+
|
84 |
+
c_std = _c_std(std)
|
85 |
+
python_std = _python_std(std)
|
86 |
+
std_fd = python_std.fileno()
|
87 |
+
|
88 |
+
def _redirect(dst):
|
89 |
+
libc.fflush(c_std)
|
90 |
+
python_std.flush()
|
91 |
+
os.dup2(dst.fileno(), std_fd)
|
92 |
+
|
93 |
+
with os.fdopen(os.dup(std_fd)) as orig_std, open(to_file, mode="w+b") as dst:
|
94 |
+
_redirect(dst)
|
95 |
+
try:
|
96 |
+
yield
|
97 |
+
finally:
|
98 |
+
_redirect(orig_std)
|
99 |
+
|
100 |
+
|
101 |
+
redirect_stdout = partial(redirect, "stdout")
|
102 |
+
redirect_stderr = partial(redirect, "stderr")
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__init__.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
|
3 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
4 |
+
# All rights reserved.
|
5 |
+
#
|
6 |
+
# This source code is licensed under the BSD-style license found in the
|
7 |
+
# LICENSE file in the root directory of this source tree.
|
8 |
+
from torch.distributed.elastic.multiprocessing.subprocess_handler.handlers import (
|
9 |
+
get_subprocess_handler,
|
10 |
+
)
|
11 |
+
from torch.distributed.elastic.multiprocessing.subprocess_handler.subprocess_handler import (
|
12 |
+
SubprocessHandler,
|
13 |
+
)
|
14 |
+
|
15 |
+
__all__ = ["SubprocessHandler", "get_subprocess_handler"]
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (494 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/handlers.cpython-310.pyc
ADDED
Binary file (718 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/subprocess_handler.cpython-310.pyc
ADDED
Binary file (2.36 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/handlers.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
|
3 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
4 |
+
# All rights reserved.
|
5 |
+
#
|
6 |
+
# This source code is licensed under the BSD-style license found in the
|
7 |
+
# LICENSE file in the root directory of this source tree.
|
8 |
+
from typing import Dict, Tuple
|
9 |
+
|
10 |
+
from torch.distributed.elastic.multiprocessing.subprocess_handler.subprocess_handler import (
|
11 |
+
SubprocessHandler,
|
12 |
+
)
|
13 |
+
|
14 |
+
__all__ = ["get_subprocess_handler"]
|
15 |
+
|
16 |
+
|
17 |
+
def get_subprocess_handler(
|
18 |
+
entrypoint: str,
|
19 |
+
args: Tuple,
|
20 |
+
env: Dict[str, str],
|
21 |
+
stdout: str,
|
22 |
+
stderr: str,
|
23 |
+
local_rank_id: int,
|
24 |
+
):
|
25 |
+
return SubprocessHandler(
|
26 |
+
entrypoint=entrypoint,
|
27 |
+
args=args,
|
28 |
+
env=env,
|
29 |
+
stdout=stdout,
|
30 |
+
stderr=stderr,
|
31 |
+
local_rank_id=local_rank_id,
|
32 |
+
)
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/subprocess_handler.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
|
3 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
4 |
+
# All rights reserved.
|
5 |
+
#
|
6 |
+
# This source code is licensed under the BSD-style license found in the
|
7 |
+
# LICENSE file in the root directory of this source tree.
|
8 |
+
import os
|
9 |
+
import signal
|
10 |
+
import subprocess
|
11 |
+
import sys
|
12 |
+
|
13 |
+
from typing import Any, Dict, Optional, Tuple
|
14 |
+
|
15 |
+
__all__ = ["SubprocessHandler"]
|
16 |
+
|
17 |
+
IS_WINDOWS = sys.platform == "win32"
|
18 |
+
|
19 |
+
|
20 |
+
def _get_default_signal() -> signal.Signals:
|
21 |
+
"""Get the default termination signal. SIGTERM for unix, CTRL_C_EVENT for windows."""
|
22 |
+
if IS_WINDOWS:
|
23 |
+
return signal.CTRL_C_EVENT # type: ignore[attr-defined] # noqa: F821
|
24 |
+
else:
|
25 |
+
return signal.SIGTERM
|
26 |
+
|
27 |
+
|
28 |
+
class SubprocessHandler:
|
29 |
+
"""
|
30 |
+
Convenience wrapper around python's ``subprocess.Popen``. Keeps track of
|
31 |
+
meta-objects associated to the process (e.g. stdout and stderr redirect fds).
|
32 |
+
"""
|
33 |
+
|
34 |
+
def __init__(
|
35 |
+
self,
|
36 |
+
entrypoint: str,
|
37 |
+
args: Tuple,
|
38 |
+
env: Dict[str, str],
|
39 |
+
stdout: str,
|
40 |
+
stderr: str,
|
41 |
+
local_rank_id: int,
|
42 |
+
):
|
43 |
+
self._stdout = open(stdout, "w") if stdout else None
|
44 |
+
self._stderr = open(stderr, "w") if stderr else None
|
45 |
+
# inherit parent environment vars
|
46 |
+
env_vars = os.environ.copy()
|
47 |
+
env_vars.update(env)
|
48 |
+
|
49 |
+
args_str = (entrypoint, *[str(e) for e in args])
|
50 |
+
self.local_rank_id = local_rank_id
|
51 |
+
self.proc: subprocess.Popen = self._popen(args_str, env_vars)
|
52 |
+
|
53 |
+
def _popen(self, args: Tuple, env: Dict[str, str]) -> subprocess.Popen:
|
54 |
+
kwargs: Dict[str, Any] = {}
|
55 |
+
if not IS_WINDOWS:
|
56 |
+
kwargs["start_new_session"] = True
|
57 |
+
return subprocess.Popen(
|
58 |
+
# pyre-fixme[6]: Expected `Union[typing.Sequence[Union[_PathLike[bytes],
|
59 |
+
# _PathLike[str], bytes, str]], bytes, str]` for 1st param but got
|
60 |
+
# `Tuple[str, *Tuple[Any, ...]]`.
|
61 |
+
args=args,
|
62 |
+
env=env,
|
63 |
+
stdout=self._stdout,
|
64 |
+
stderr=self._stderr,
|
65 |
+
**kwargs,
|
66 |
+
)
|
67 |
+
|
68 |
+
def close(self, death_sig: Optional[signal.Signals] = None) -> None:
|
69 |
+
if not death_sig:
|
70 |
+
death_sig = _get_default_signal()
|
71 |
+
if IS_WINDOWS:
|
72 |
+
self.proc.send_signal(death_sig)
|
73 |
+
else:
|
74 |
+
os.killpg(self.proc.pid, death_sig)
|
75 |
+
if self._stdout:
|
76 |
+
self._stdout.close()
|
77 |
+
if self._stderr:
|
78 |
+
self._stderr.close()
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/tail_log.py
ADDED
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
|
3 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
4 |
+
# All rights reserved.
|
5 |
+
#
|
6 |
+
# This source code is licensed under the BSD-style license found in the
|
7 |
+
# LICENSE file in the root directory of this source tree.
|
8 |
+
|
9 |
+
import logging
|
10 |
+
import os
|
11 |
+
import time
|
12 |
+
from concurrent.futures._base import Future
|
13 |
+
from concurrent.futures.thread import ThreadPoolExecutor
|
14 |
+
from threading import Event
|
15 |
+
from typing import Dict, List, Optional, TextIO
|
16 |
+
|
17 |
+
__all__ = ["tail_logfile", "TailLog"]
|
18 |
+
|
19 |
+
log = logging.getLogger(__name__)
|
20 |
+
|
21 |
+
|
22 |
+
def tail_logfile(
|
23 |
+
header: str, file: str, dst: TextIO, finished: Event, interval_sec: float
|
24 |
+
):
|
25 |
+
|
26 |
+
while not os.path.exists(file):
|
27 |
+
if finished.is_set():
|
28 |
+
return
|
29 |
+
time.sleep(interval_sec)
|
30 |
+
|
31 |
+
with open(file, errors="replace") as fp:
|
32 |
+
while True:
|
33 |
+
line = fp.readline()
|
34 |
+
|
35 |
+
if line:
|
36 |
+
dst.write(f"{header}{line}")
|
37 |
+
else: # reached EOF
|
38 |
+
if finished.is_set():
|
39 |
+
# log line producer is finished
|
40 |
+
break
|
41 |
+
else:
|
42 |
+
# log line producer is still going
|
43 |
+
# wait for a bit before looping again
|
44 |
+
time.sleep(interval_sec)
|
45 |
+
|
46 |
+
|
47 |
+
class TailLog:
|
48 |
+
"""
|
49 |
+
Tail the given log files.
|
50 |
+
|
51 |
+
The log files do not have to exist when the ``start()`` method is called. The tail-er will gracefully wait until
|
52 |
+
the log files are created by the producer and will tail the contents of the
|
53 |
+
log files until the ``stop()`` method is called.
|
54 |
+
|
55 |
+
.. warning:: ``TailLog`` will wait indefinitely for the log file to be created!
|
56 |
+
|
57 |
+
Each log file's line will be suffixed with a header of the form: ``[{name}{idx}]:``,
|
58 |
+
where the ``name`` is user-provided and ``idx`` is the index of the log file
|
59 |
+
in the ``log_files`` mapping. ``log_line_prefixes`` can be used to override the
|
60 |
+
header for each log file.
|
61 |
+
|
62 |
+
Usage:
|
63 |
+
|
64 |
+
::
|
65 |
+
|
66 |
+
log_files = {0: "/tmp/0_stdout.log", 1: "/tmp/1_stdout.log"}
|
67 |
+
tailer = TailLog("trainer", log_files, sys.stdout).start()
|
68 |
+
# actually run the trainers to produce 0_stdout.log and 1_stdout.log
|
69 |
+
run_trainers()
|
70 |
+
tailer.stop()
|
71 |
+
|
72 |
+
# once run_trainers() start writing the ##_stdout.log files
|
73 |
+
# the tailer will print to sys.stdout:
|
74 |
+
# >>> [trainer0]:log_line1
|
75 |
+
# >>> [trainer1]:log_line1
|
76 |
+
# >>> [trainer0]:log_line2
|
77 |
+
# >>> [trainer0]:log_line3
|
78 |
+
# >>> [trainer1]:log_line2
|
79 |
+
|
80 |
+
.. note:: Due to buffering log lines between files may not necessarily
|
81 |
+
be printed out in order. You should configure your application's
|
82 |
+
logger to suffix each log line with a proper timestamp.
|
83 |
+
|
84 |
+
"""
|
85 |
+
|
86 |
+
def __init__(
|
87 |
+
self,
|
88 |
+
name: str,
|
89 |
+
log_files: Dict[int, str],
|
90 |
+
dst: TextIO,
|
91 |
+
log_line_prefixes: Optional[Dict[int, str]] = None,
|
92 |
+
interval_sec: float = 0.1,
|
93 |
+
):
|
94 |
+
n = len(log_files)
|
95 |
+
self._threadpool = None
|
96 |
+
if n > 0:
|
97 |
+
self._threadpool = ThreadPoolExecutor(
|
98 |
+
max_workers=n,
|
99 |
+
thread_name_prefix=f"{self.__class__.__qualname__}_{name}",
|
100 |
+
)
|
101 |
+
|
102 |
+
self._name = name
|
103 |
+
self._dst = dst
|
104 |
+
self._log_files = log_files
|
105 |
+
self._log_line_prefixes = log_line_prefixes
|
106 |
+
self._finished_events: Dict[int, Event] = {
|
107 |
+
local_rank: Event() for local_rank in log_files.keys()
|
108 |
+
}
|
109 |
+
self._futs: List[Future] = []
|
110 |
+
self._interval_sec = interval_sec
|
111 |
+
self._stopped = False
|
112 |
+
|
113 |
+
def start(self) -> "TailLog":
|
114 |
+
if not self._threadpool:
|
115 |
+
return self
|
116 |
+
|
117 |
+
for local_rank, file in self._log_files.items():
|
118 |
+
header = f"[{self._name}{local_rank}]:"
|
119 |
+
if self._log_line_prefixes and local_rank in self._log_line_prefixes:
|
120 |
+
header = self._log_line_prefixes[local_rank]
|
121 |
+
self._futs.append(
|
122 |
+
self._threadpool.submit(
|
123 |
+
tail_logfile,
|
124 |
+
header=header,
|
125 |
+
file=file,
|
126 |
+
dst=self._dst,
|
127 |
+
finished=self._finished_events[local_rank],
|
128 |
+
interval_sec=self._interval_sec,
|
129 |
+
)
|
130 |
+
)
|
131 |
+
return self
|
132 |
+
|
133 |
+
def stop(self) -> None:
|
134 |
+
for finished in self._finished_events.values():
|
135 |
+
finished.set()
|
136 |
+
|
137 |
+
for local_rank, f in enumerate(self._futs):
|
138 |
+
try:
|
139 |
+
f.result()
|
140 |
+
except Exception as e:
|
141 |
+
log.error(
|
142 |
+
"error in log tailor for %s%s. %s: %s",
|
143 |
+
self._name, local_rank,
|
144 |
+
e.__class__.__qualname__, e,
|
145 |
+
)
|
146 |
+
|
147 |
+
if self._threadpool:
|
148 |
+
self._threadpool.shutdown(wait=True)
|
149 |
+
|
150 |
+
self._stopped = True
|
151 |
+
|
152 |
+
def stopped(self) -> bool:
|
153 |
+
return self._stopped
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__init__.py
ADDED
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the BSD-style license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
"""
|
8 |
+
In the context of Torch Distributed Elastic we use the term *rendezvous* to
|
9 |
+
refer to a particular functionality that combines a **distributed
|
10 |
+
synchronization** primitive with **peer discovery**.
|
11 |
+
|
12 |
+
It is used by Torch Distributed Elastic to gather participants of a training
|
13 |
+
job (i.e. nodes) such that they all agree on the same list of participants and
|
14 |
+
everyone's roles, as well as make a consistent collective decision on when
|
15 |
+
training can begin/resume.
|
16 |
+
|
17 |
+
Torch Distributed Elastic rendezvous provides the following critical
|
18 |
+
functionalities:
|
19 |
+
|
20 |
+
**Barrier**:
|
21 |
+
|
22 |
+
Nodes performing rendezvous will all block until the rendezvous is considered
|
23 |
+
complete - this happens when at least ``min`` total number of nodes have joined
|
24 |
+
the rendezvous barrier (for the same job). This also implies the barrier is not
|
25 |
+
necessarily of fixed size.
|
26 |
+
|
27 |
+
There's an additional small waiting time after reaching ``min`` number of
|
28 |
+
nodes - this is used to ensure the rendezvous is not completed "too quickly"
|
29 |
+
(which could potentially exclude additional nodes attempting to join at
|
30 |
+
approximately the same time).
|
31 |
+
|
32 |
+
If ``max`` number of nodes is gathered at the barrier, the rendezvous is
|
33 |
+
completed immediately.
|
34 |
+
|
35 |
+
There's also an overall timeout which causes the rendezvous to fail if ``min``
|
36 |
+
number of nodes is never reached - this is meant to be a simple fail-safe to
|
37 |
+
help release partially allocated job resources, in case there's a problem with
|
38 |
+
the resource manager, and is meant to be interpreted as non-retryable.
|
39 |
+
|
40 |
+
**Exclusivity**:
|
41 |
+
|
42 |
+
A simple distributed barrier would not be sufficient, as we also need to ensure
|
43 |
+
that only one group of nodes exists at any given time (for a given job). In
|
44 |
+
other words, new nodes (i.e. joining late) should not be able to form a parallel
|
45 |
+
independent group of workers for the same job.
|
46 |
+
|
47 |
+
Torch Distributed Elastic rendezvous ensures that if a group of nodes has
|
48 |
+
already completed a rendezvous (and hence might already be training), then
|
49 |
+
additional "late" nodes attempting to rendezvous will only announce themselves
|
50 |
+
as waiting, and will have to wait until the (previously completed) existing
|
51 |
+
rendezvous is destroyed first.
|
52 |
+
|
53 |
+
**Consistency**:
|
54 |
+
|
55 |
+
When a rendezvous is completed, all its members will agree on the job membership
|
56 |
+
and everyone's role in it. This role is represented using an integer, called
|
57 |
+
rank, that is between between 0 and world size.
|
58 |
+
|
59 |
+
Note that ranks are *not stable*, in the sense that the same node can be
|
60 |
+
assigned a different rank in the next (re-)rendezvous.
|
61 |
+
|
62 |
+
**Fault-tolerance**:
|
63 |
+
|
64 |
+
Torch Distributed Elastic rendezvous is designed to tolerate node failures
|
65 |
+
during the rendezvous process. Should a process crash (or lose network
|
66 |
+
connectivity, etc), between joining the rendezvous and it being completed, then
|
67 |
+
a re-rendezvous with remaining healthy nodes will happen automatically.
|
68 |
+
|
69 |
+
A node can also fail *after* it has completed (or *has been observered* by other
|
70 |
+
nodes to have completed) the rendezvous - this scenario will be handled by the
|
71 |
+
Torch Distributed Elastic ``train_loop`` instead (where it will also trigger a
|
72 |
+
re-rendezvous).
|
73 |
+
|
74 |
+
**Shared key-value store**:
|
75 |
+
|
76 |
+
When the rendezvous is completed, a shared key-value store is created and
|
77 |
+
returned. This store implements a ``torch.distributed.Store`` API (see
|
78 |
+
`distributed communication docs
|
79 |
+
<https://pytorch.org/docs/stable/distributed.html>`__).
|
80 |
+
|
81 |
+
This store is only shared by the members of the completed rendezvous. It
|
82 |
+
is intended to be used by Torch Distributed Elastic to exchange information
|
83 |
+
necessary to initialize job control and data-planes.
|
84 |
+
|
85 |
+
**Waiting workers and rendezvous closing**:
|
86 |
+
|
87 |
+
Torch Distributed Elastic rendezvous handler object provides additional
|
88 |
+
functionalities, which are technically not part of the rendezvous process:
|
89 |
+
|
90 |
+
1. Querying how many workers arrived late at the barrier, who can participate in
|
91 |
+
*next* rendezvous.
|
92 |
+
|
93 |
+
2. Setting the rendezvous *closed* to signal all nodes not to participate in
|
94 |
+
next rendezvous.
|
95 |
+
|
96 |
+
**DynamicRendezvousHandler**:
|
97 |
+
|
98 |
+
Torch Distributed Elastic comes with the :py:class:`.DynamicRendezvousHandler`
|
99 |
+
class that implements the rendezvous mechanism described above. It is a backend-
|
100 |
+
agnostic type that expects a particular :py:class:`.RendezvousBackend` instance
|
101 |
+
to be specified during construction.
|
102 |
+
|
103 |
+
Torch distributed users can either implement their own backend type or use one
|
104 |
+
of the following implementations that come with PyTorch:
|
105 |
+
|
106 |
+
- :py:class:`.C10dRendezvousBackend`: Uses a C10d store (by default
|
107 |
+
``TCPStore``) as the rendezvous backend. The main advantage of using a C10d
|
108 |
+
store is that it requires no 3rd-party dependency (such as etcd) to establish
|
109 |
+
a rendezvous.
|
110 |
+
- :py:class:`.EtcdRendezvousBackend`: Supersedes the legacy
|
111 |
+
:py:class:`.EtcdRendezvousHandler` class. Passing an
|
112 |
+
:py:class:`.EtcdRendezvousBackend` instance to
|
113 |
+
:py:class:`.DynamicRendezvousHandler` is functionally equivalent to
|
114 |
+
instantiating an :py:class:`.EtcdRendezvousHandler`.
|
115 |
+
|
116 |
+
::
|
117 |
+
|
118 |
+
store = TCPStore("localhost")
|
119 |
+
|
120 |
+
backend = C10dRendezvousBackend(store, "my_run_id")
|
121 |
+
|
122 |
+
rdzv_handler = DynamicRendezvousHandler.from_backend(
|
123 |
+
run_id="my_run_id",
|
124 |
+
store=store,
|
125 |
+
backend=backend,
|
126 |
+
min_nodes=2,
|
127 |
+
max_nodes=4
|
128 |
+
)
|
129 |
+
"""
|
130 |
+
|
131 |
+
from .api import * # noqa: F403
|
132 |
+
from .registry import _register_default_handlers
|
133 |
+
|
134 |
+
|
135 |
+
_register_default_handlers()
|
136 |
+
|
137 |
+
|
138 |
+
__all__ = [
|
139 |
+
"RendezvousClosedError",
|
140 |
+
"RendezvousConnectionError",
|
141 |
+
"RendezvousError",
|
142 |
+
"RendezvousGracefulExitError",
|
143 |
+
"RendezvousHandler",
|
144 |
+
"RendezvousHandlerCreator",
|
145 |
+
"RendezvousHandlerRegistry",
|
146 |
+
"RendezvousParameters",
|
147 |
+
"RendezvousStateError",
|
148 |
+
"RendezvousTimeoutError",
|
149 |
+
"rendezvous_handler_registry",
|
150 |
+
]
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (5.77 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/api.cpython-310.pyc
ADDED
Binary file (9.81 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/c10d_rendezvous_backend.cpython-310.pyc
ADDED
Binary file (7.97 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/dynamic_rendezvous.cpython-310.pyc
ADDED
Binary file (38.3 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_rendezvous.cpython-310.pyc
ADDED
Binary file (27.2 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_rendezvous_backend.cpython-310.pyc
ADDED
Binary file (6.47 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_server.cpython-310.pyc
ADDED
Binary file (7.89 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_store.cpython-310.pyc
ADDED
Binary file (5.34 kB). View file
|
|