Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step40/zero/12.post_attention_layernorm.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step40/zero/12.post_attention_layernorm.weight/fp32.pt +3 -0
- ckpts/universal/global_step40/zero/14.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step40/zero/14.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
- ckpts/universal/global_step40/zero/20.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step40/zero/21.post_attention_layernorm.weight/fp32.pt +3 -0
- venv/lib/python3.10/site-packages/accelerate/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/accelerate/__pycache__/big_modeling.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/accelerate/__pycache__/checkpointing.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/accelerate/__pycache__/data_loader.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/accelerate/__pycache__/hooks.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/accelerate/__pycache__/inference.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/accelerate/__pycache__/launchers.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/accelerate/__pycache__/local_sgd.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/accelerate/__pycache__/logging.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/accelerate/__pycache__/memory_utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/accelerate/__pycache__/optimizer.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/accelerate/__pycache__/scheduler.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/accelerate/__pycache__/state.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/accelerate/__pycache__/tracking.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/accelerate/commands/__init__.py +13 -0
- venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/accelerate_cli.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/env.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/estimate.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/launch.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/test.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/tpu.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py +50 -0
- venv/lib/python3.10/site-packages/accelerate/commands/env.py +107 -0
- venv/lib/python3.10/site-packages/accelerate/commands/estimate.py +309 -0
- venv/lib/python3.10/site-packages/accelerate/commands/launch.py +1085 -0
- venv/lib/python3.10/site-packages/accelerate/commands/menu/__init__.py +14 -0
- venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/cursor.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/helpers.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/input.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/keymap.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/selection_menu.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/accelerate/commands/menu/cursor.py +65 -0
- venv/lib/python3.10/site-packages/accelerate/commands/menu/helpers.py +59 -0
- venv/lib/python3.10/site-packages/accelerate/commands/menu/input.py +86 -0
- venv/lib/python3.10/site-packages/accelerate/commands/menu/keymap.py +133 -0
- venv/lib/python3.10/site-packages/accelerate/commands/menu/selection_menu.py +144 -0
- venv/lib/python3.10/site-packages/accelerate/commands/test.py +65 -0
- venv/lib/python3.10/site-packages/accelerate/commands/tpu.py +157 -0
- venv/lib/python3.10/site-packages/accelerate/commands/utils.py +120 -0
- venv/lib/python3.10/site-packages/accelerate/utils/__init__.py +225 -0
ckpts/universal/global_step40/zero/12.post_attention_layernorm.weight/exp_avg.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:165572568fad0d9c431ae19b33aecc84646d487ae264873a81bb8c60b6dcaa17
|
| 3 |
+
size 9372
|
ckpts/universal/global_step40/zero/12.post_attention_layernorm.weight/fp32.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:03eefe570affbc919a85497868156d412b8ccfe08eecec7baf6894fb08e4cfa8
|
| 3 |
+
size 9293
|
ckpts/universal/global_step40/zero/14.mlp.dense_h_to_4h.weight/exp_avg.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6223ebd887bd55fb48afd407e325a99e6f87d6c113fdf96694f1b25519b053d8
|
| 3 |
+
size 33555612
|
ckpts/universal/global_step40/zero/14.mlp.dense_h_to_4h.weight/fp32.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2994aa83bde7c30048029d27d3081d0959d85cd9039cade33ba72d03907e903b
|
| 3 |
+
size 33555533
|
ckpts/universal/global_step40/zero/20.post_attention_layernorm.weight/exp_avg_sq.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3599aefe77491a064284602307c5d243e861a55c13ffae66cdd5e21886ff0e66
|
| 3 |
+
size 9387
|
ckpts/universal/global_step40/zero/21.post_attention_layernorm.weight/fp32.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0168178f990f6d01a96d00892354cc058836dfa0a396ca2a0926b77d89481cd2
|
| 3 |
+
size 9293
|
venv/lib/python3.10/site-packages/accelerate/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.17 kB). View file
|
|
|
venv/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc
ADDED
|
Binary file (108 kB). View file
|
|
|
venv/lib/python3.10/site-packages/accelerate/__pycache__/big_modeling.cpython-310.pyc
ADDED
|
Binary file (23.2 kB). View file
|
|
|
venv/lib/python3.10/site-packages/accelerate/__pycache__/checkpointing.cpython-310.pyc
ADDED
|
Binary file (8.17 kB). View file
|
|
|
venv/lib/python3.10/site-packages/accelerate/__pycache__/data_loader.cpython-310.pyc
ADDED
|
Binary file (33.4 kB). View file
|
|
|
venv/lib/python3.10/site-packages/accelerate/__pycache__/hooks.cpython-310.pyc
ADDED
|
Binary file (22.8 kB). View file
|
|
|
venv/lib/python3.10/site-packages/accelerate/__pycache__/inference.cpython-310.pyc
ADDED
|
Binary file (6.09 kB). View file
|
|
|
venv/lib/python3.10/site-packages/accelerate/__pycache__/launchers.cpython-310.pyc
ADDED
|
Binary file (8.05 kB). View file
|
|
|
venv/lib/python3.10/site-packages/accelerate/__pycache__/local_sgd.cpython-310.pyc
ADDED
|
Binary file (3.61 kB). View file
|
|
|
venv/lib/python3.10/site-packages/accelerate/__pycache__/logging.cpython-310.pyc
ADDED
|
Binary file (4.43 kB). View file
|
|
|
venv/lib/python3.10/site-packages/accelerate/__pycache__/memory_utils.cpython-310.pyc
ADDED
|
Binary file (431 Bytes). View file
|
|
|
venv/lib/python3.10/site-packages/accelerate/__pycache__/optimizer.cpython-310.pyc
ADDED
|
Binary file (6.88 kB). View file
|
|
|
venv/lib/python3.10/site-packages/accelerate/__pycache__/scheduler.cpython-310.pyc
ADDED
|
Binary file (3.35 kB). View file
|
|
|
venv/lib/python3.10/site-packages/accelerate/__pycache__/state.cpython-310.pyc
ADDED
|
Binary file (39.7 kB). View file
|
|
|
venv/lib/python3.10/site-packages/accelerate/__pycache__/tracking.cpython-310.pyc
ADDED
|
Binary file (37.3 kB). View file
|
|
|
venv/lib/python3.10/site-packages/accelerate/commands/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (187 Bytes). View file
|
|
|
venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/accelerate_cli.cpython-310.pyc
ADDED
|
Binary file (1.22 kB). View file
|
|
|
venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/env.cpython-310.pyc
ADDED
|
Binary file (2.87 kB). View file
|
|
|
venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/estimate.cpython-310.pyc
ADDED
|
Binary file (10.2 kB). View file
|
|
|
venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/launch.cpython-310.pyc
ADDED
|
Binary file (28.1 kB). View file
|
|
|
venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/test.cpython-310.pyc
ADDED
|
Binary file (1.65 kB). View file
|
|
|
venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/tpu.cpython-310.pyc
ADDED
|
Binary file (3.85 kB). View file
|
|
|
venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (3.71 kB). View file
|
|
|
venv/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
|
| 3 |
+
# Copyright 2021 The HuggingFace Team. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
from accelerate.commands.config import get_config_parser
|
| 18 |
+
from accelerate.commands.env import env_command_parser
|
| 19 |
+
from accelerate.commands.estimate import estimate_command_parser
|
| 20 |
+
from accelerate.commands.launch import launch_command_parser
|
| 21 |
+
from accelerate.commands.test import test_command_parser
|
| 22 |
+
from accelerate.commands.tpu import tpu_command_parser
|
| 23 |
+
from accelerate.commands.utils import CustomArgumentParser
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def main():
|
| 27 |
+
parser = CustomArgumentParser("Accelerate CLI tool", usage="accelerate <command> [<args>]", allow_abbrev=False)
|
| 28 |
+
subparsers = parser.add_subparsers(help="accelerate command helpers")
|
| 29 |
+
|
| 30 |
+
# Register commands
|
| 31 |
+
get_config_parser(subparsers=subparsers)
|
| 32 |
+
estimate_command_parser(subparsers=subparsers)
|
| 33 |
+
env_command_parser(subparsers=subparsers)
|
| 34 |
+
launch_command_parser(subparsers=subparsers)
|
| 35 |
+
tpu_command_parser(subparsers=subparsers)
|
| 36 |
+
test_command_parser(subparsers=subparsers)
|
| 37 |
+
|
| 38 |
+
# Let's go
|
| 39 |
+
args = parser.parse_args()
|
| 40 |
+
|
| 41 |
+
if not hasattr(args, "func"):
|
| 42 |
+
parser.print_help()
|
| 43 |
+
exit(1)
|
| 44 |
+
|
| 45 |
+
# Run
|
| 46 |
+
args.func(args)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
if __name__ == "__main__":
|
| 50 |
+
main()
|
venv/lib/python3.10/site-packages/accelerate/commands/env.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
|
| 3 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
import argparse
|
| 18 |
+
import os
|
| 19 |
+
import platform
|
| 20 |
+
import subprocess
|
| 21 |
+
|
| 22 |
+
import numpy as np
|
| 23 |
+
import psutil
|
| 24 |
+
import torch
|
| 25 |
+
|
| 26 |
+
from accelerate import __version__ as version
|
| 27 |
+
from accelerate.commands.config import default_config_file, load_config_from_file
|
| 28 |
+
|
| 29 |
+
from ..utils import is_mlu_available, is_npu_available, is_xpu_available
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def env_command_parser(subparsers=None):
|
| 33 |
+
if subparsers is not None:
|
| 34 |
+
parser = subparsers.add_parser("env")
|
| 35 |
+
else:
|
| 36 |
+
parser = argparse.ArgumentParser("Accelerate env command")
|
| 37 |
+
|
| 38 |
+
parser.add_argument(
|
| 39 |
+
"--config_file", default=None, help="The config file to use for the default values in the launching script."
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
if subparsers is not None:
|
| 43 |
+
parser.set_defaults(func=env_command)
|
| 44 |
+
return parser
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def env_command(args):
|
| 48 |
+
pt_version = torch.__version__
|
| 49 |
+
pt_cuda_available = torch.cuda.is_available()
|
| 50 |
+
pt_xpu_available = is_xpu_available()
|
| 51 |
+
pt_mlu_available = is_mlu_available()
|
| 52 |
+
pt_npu_available = is_npu_available()
|
| 53 |
+
|
| 54 |
+
accelerate_config = "Not found"
|
| 55 |
+
# Get the default from the config file.
|
| 56 |
+
if args.config_file is not None or os.path.isfile(default_config_file):
|
| 57 |
+
accelerate_config = load_config_from_file(args.config_file).to_dict()
|
| 58 |
+
|
| 59 |
+
# if we can run which, get it
|
| 60 |
+
command = None
|
| 61 |
+
bash_location = "Not found"
|
| 62 |
+
if os.name == "nt":
|
| 63 |
+
command = ["where", "accelerate"]
|
| 64 |
+
elif os.name == "posix":
|
| 65 |
+
command = ["which", "accelerate"]
|
| 66 |
+
if command is not None:
|
| 67 |
+
bash_location = subprocess.check_output(command, text=True, stderr=subprocess.STDOUT).strip()
|
| 68 |
+
info = {
|
| 69 |
+
"`Accelerate` version": version,
|
| 70 |
+
"Platform": platform.platform(),
|
| 71 |
+
"`accelerate` bash location": bash_location,
|
| 72 |
+
"Python version": platform.python_version(),
|
| 73 |
+
"Numpy version": np.__version__,
|
| 74 |
+
"PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})",
|
| 75 |
+
"PyTorch XPU available": str(pt_xpu_available),
|
| 76 |
+
"PyTorch NPU available": str(pt_npu_available),
|
| 77 |
+
"PyTorch MLU available": str(pt_mlu_available),
|
| 78 |
+
"System RAM": f"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB",
|
| 79 |
+
}
|
| 80 |
+
if pt_cuda_available:
|
| 81 |
+
info["GPU type"] = torch.cuda.get_device_name()
|
| 82 |
+
|
| 83 |
+
print("\nCopy-and-paste the text below in your GitHub issue\n")
|
| 84 |
+
print("\n".join([f"- {prop}: {val}" for prop, val in info.items()]))
|
| 85 |
+
|
| 86 |
+
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:")
|
| 87 |
+
accelerate_config_str = (
|
| 88 |
+
"\n".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()])
|
| 89 |
+
if isinstance(accelerate_config, dict)
|
| 90 |
+
else f"\t{accelerate_config}"
|
| 91 |
+
)
|
| 92 |
+
print(accelerate_config_str)
|
| 93 |
+
|
| 94 |
+
info["`Accelerate` configs"] = accelerate_config
|
| 95 |
+
|
| 96 |
+
return info
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def main() -> int:
|
| 100 |
+
parser = env_command_parser()
|
| 101 |
+
args = parser.parse_args()
|
| 102 |
+
env_command(args)
|
| 103 |
+
return 0
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
if __name__ == "__main__":
|
| 107 |
+
raise SystemExit(main())
|
venv/lib/python3.10/site-packages/accelerate/commands/estimate.py
ADDED
|
@@ -0,0 +1,309 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
|
| 3 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
from huggingface_hub import model_info
|
| 17 |
+
from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError
|
| 18 |
+
|
| 19 |
+
from accelerate import init_empty_weights
|
| 20 |
+
from accelerate.commands.utils import CustomArgumentParser
|
| 21 |
+
from accelerate.utils import (
|
| 22 |
+
calculate_maximum_sizes,
|
| 23 |
+
convert_bytes,
|
| 24 |
+
is_timm_available,
|
| 25 |
+
is_transformers_available,
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
if is_transformers_available():
|
| 30 |
+
import transformers
|
| 31 |
+
from transformers import AutoConfig, AutoModel
|
| 32 |
+
|
| 33 |
+
if is_timm_available():
|
| 34 |
+
import timm
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def verify_on_hub(repo: str, token: str = None):
|
| 38 |
+
"Verifies that the model is on the hub and returns the model info."
|
| 39 |
+
try:
|
| 40 |
+
return model_info(repo, token=token)
|
| 41 |
+
except GatedRepoError:
|
| 42 |
+
return "gated"
|
| 43 |
+
except RepositoryNotFoundError:
|
| 44 |
+
return "repo"
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def check_has_model(error):
|
| 48 |
+
"""
|
| 49 |
+
Checks what library spawned `error` when a model is not found
|
| 50 |
+
"""
|
| 51 |
+
if is_timm_available() and isinstance(error, RuntimeError) and "Unknown model" in error.args[0]:
|
| 52 |
+
return "timm"
|
| 53 |
+
elif (
|
| 54 |
+
is_transformers_available()
|
| 55 |
+
and isinstance(error, OSError)
|
| 56 |
+
and "does not appear to have a file named" in error.args[0]
|
| 57 |
+
):
|
| 58 |
+
return "transformers"
|
| 59 |
+
else:
|
| 60 |
+
return "unknown"
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def create_empty_model(model_name: str, library_name: str, trust_remote_code: bool = False, access_token: str = None):
|
| 64 |
+
"""
|
| 65 |
+
Creates an empty model from its parent library on the `Hub` to calculate the overall memory consumption.
|
| 66 |
+
|
| 67 |
+
Args:
|
| 68 |
+
model_name (`str`):
|
| 69 |
+
The model name on the Hub
|
| 70 |
+
library_name (`str`):
|
| 71 |
+
The library the model has an integration with, such as `transformers`. Will be used if `model_name` has no
|
| 72 |
+
metadata on the Hub to determine the library.
|
| 73 |
+
trust_remote_code (`bool`, `optional`, defaults to `False`):
|
| 74 |
+
Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
|
| 75 |
+
should only be set to `True` for repositories you trust and in which you have read the code, as it will
|
| 76 |
+
execute code present on the Hub on your local machine.
|
| 77 |
+
access_token (`str`, `optional`, defaults to `None`):
|
| 78 |
+
The access token to use to access private or gated models on the Hub. (for use on the Gradio app)
|
| 79 |
+
|
| 80 |
+
Returns:
|
| 81 |
+
`torch.nn.Module`: The torch model that has been initialized on the `meta` device.
|
| 82 |
+
|
| 83 |
+
"""
|
| 84 |
+
model_info = verify_on_hub(model_name, access_token)
|
| 85 |
+
# Simplified errors
|
| 86 |
+
if model_info == "gated":
|
| 87 |
+
raise GatedRepoError(
|
| 88 |
+
f"Repo for model `{model_name}` is gated. You must be authenticated to access it. Please run `huggingface-cli login`."
|
| 89 |
+
)
|
| 90 |
+
elif model_info == "repo":
|
| 91 |
+
raise RepositoryNotFoundError(
|
| 92 |
+
f"Repo for model `{model_name}` does not exist on the Hub. If you are trying to access a private repo,"
|
| 93 |
+
" make sure you are authenticated via `huggingface-cli login` and have access."
|
| 94 |
+
)
|
| 95 |
+
if library_name is None:
|
| 96 |
+
library_name = getattr(model_info, "library_name", False)
|
| 97 |
+
if not library_name:
|
| 98 |
+
raise ValueError(
|
| 99 |
+
f"Model `{model_name}` does not have any library metadata on the Hub, please manually pass in a `--library_name` to use (such as `transformers`)"
|
| 100 |
+
)
|
| 101 |
+
if library_name == "transformers":
|
| 102 |
+
if not is_transformers_available():
|
| 103 |
+
raise ImportError(
|
| 104 |
+
f"To check `{model_name}`, `transformers` must be installed. Please install it via `pip install transformers`"
|
| 105 |
+
)
|
| 106 |
+
print(f"Loading pretrained config for `{model_name}` from `transformers`...")
|
| 107 |
+
if model_info.config is None:
|
| 108 |
+
raise RuntimeError(f"Tried to load `{model_name}` with `transformers` but it does not have any metadata.")
|
| 109 |
+
|
| 110 |
+
auto_map = model_info.config.get("auto_map", False)
|
| 111 |
+
config = AutoConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code, token=access_token)
|
| 112 |
+
with init_empty_weights():
|
| 113 |
+
# remote code could specify a specific `AutoModel` class in the `auto_map`
|
| 114 |
+
constructor = AutoModel
|
| 115 |
+
if isinstance(auto_map, dict):
|
| 116 |
+
value = None
|
| 117 |
+
for key in auto_map.keys():
|
| 118 |
+
if key.startswith("AutoModelFor"):
|
| 119 |
+
value = key
|
| 120 |
+
break
|
| 121 |
+
if value is not None:
|
| 122 |
+
constructor = getattr(transformers, value)
|
| 123 |
+
model = constructor.from_config(config, trust_remote_code=trust_remote_code)
|
| 124 |
+
elif library_name == "timm":
|
| 125 |
+
if not is_timm_available():
|
| 126 |
+
raise ImportError(
|
| 127 |
+
f"To check `{model_name}`, `timm` must be installed. Please install it via `pip install timm`"
|
| 128 |
+
)
|
| 129 |
+
print(f"Loading pretrained config for `{model_name}` from `timm`...")
|
| 130 |
+
with init_empty_weights():
|
| 131 |
+
model = timm.create_model(model_name, pretrained=False)
|
| 132 |
+
else:
|
| 133 |
+
raise ValueError(
|
| 134 |
+
f"Library `{library_name}` is not supported yet, please open an issue on GitHub for us to add support."
|
| 135 |
+
)
|
| 136 |
+
return model
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def create_ascii_table(headers: list, rows: list, title: str):
|
| 140 |
+
"Creates a pretty table from a list of rows, minimal version of `tabulate`."
|
| 141 |
+
sep_char, in_between = "│", "─"
|
| 142 |
+
column_widths = []
|
| 143 |
+
for i in range(len(headers)):
|
| 144 |
+
column_values = [row[i] for row in rows] + [headers[i]]
|
| 145 |
+
max_column_width = max(len(value) for value in column_values)
|
| 146 |
+
column_widths.append(max_column_width)
|
| 147 |
+
|
| 148 |
+
formats = [f"%{column_widths[i]}s" for i in range(len(rows[0]))]
|
| 149 |
+
|
| 150 |
+
pattern = f"{sep_char}{sep_char.join(formats)}{sep_char}"
|
| 151 |
+
diff = 0
|
| 152 |
+
|
| 153 |
+
def make_row(left_char, middle_char, right_char):
|
| 154 |
+
return f"{left_char}{middle_char.join([in_between * n for n in column_widths])}{in_between * diff}{right_char}"
|
| 155 |
+
|
| 156 |
+
separator = make_row("├", "┼", "┤")
|
| 157 |
+
if len(title) > sum(column_widths):
|
| 158 |
+
diff = abs(len(title) - len(separator))
|
| 159 |
+
column_widths[-1] += diff
|
| 160 |
+
|
| 161 |
+
# Update with diff
|
| 162 |
+
separator = make_row("├", "┼", "┤")
|
| 163 |
+
initial_rows = [
|
| 164 |
+
make_row("┌", in_between, "┐"),
|
| 165 |
+
f"{sep_char}{title.center(len(separator) - 2)}{sep_char}",
|
| 166 |
+
make_row("├", "┬", "┤"),
|
| 167 |
+
]
|
| 168 |
+
table = "\n".join(initial_rows) + "\n"
|
| 169 |
+
column_widths[-1] += diff
|
| 170 |
+
centered_line = [text.center(column_widths[i]) for i, text in enumerate(headers)]
|
| 171 |
+
table += f"{pattern % tuple(centered_line)}\n{separator}\n"
|
| 172 |
+
for i, line in enumerate(rows):
|
| 173 |
+
centered_line = [t.center(column_widths[i]) for i, t in enumerate(line)]
|
| 174 |
+
table += f"{pattern % tuple(centered_line)}\n"
|
| 175 |
+
table += f'└{"┴".join([in_between * n for n in column_widths])}┘'
|
| 176 |
+
|
| 177 |
+
return table
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def estimate_command_parser(subparsers=None):
|
| 181 |
+
if subparsers is not None:
|
| 182 |
+
parser = subparsers.add_parser("estimate-memory")
|
| 183 |
+
else:
|
| 184 |
+
parser = CustomArgumentParser(description="Model size estimator for fitting a model onto CUDA memory.")
|
| 185 |
+
|
| 186 |
+
parser.add_argument("model_name", type=str, help="The model name on the Hugging Face Hub.")
|
| 187 |
+
parser.add_argument(
|
| 188 |
+
"--library_name",
|
| 189 |
+
type=str,
|
| 190 |
+
help="The library the model has an integration with, such as `transformers`, needed only if this information is not stored on the Hub.",
|
| 191 |
+
choices=["timm", "transformers"],
|
| 192 |
+
)
|
| 193 |
+
parser.add_argument(
|
| 194 |
+
"--dtypes",
|
| 195 |
+
type=str,
|
| 196 |
+
nargs="+",
|
| 197 |
+
default=["float32", "float16", "int8", "int4"],
|
| 198 |
+
help="The dtypes to use for the model, must be one (or many) of `float32`, `float16`, `int8`, and `int4`",
|
| 199 |
+
choices=["float32", "float16", "int8", "int4"],
|
| 200 |
+
)
|
| 201 |
+
parser.add_argument(
|
| 202 |
+
"--trust_remote_code",
|
| 203 |
+
action="store_true",
|
| 204 |
+
help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. This flag
|
| 205 |
+
should only be used for repositories you trust and in which you have read the code, as it will execute
|
| 206 |
+
code present on the Hub on your local machine.""",
|
| 207 |
+
default=False,
|
| 208 |
+
)
|
| 209 |
+
|
| 210 |
+
if subparsers is not None:
|
| 211 |
+
parser.set_defaults(func=estimate_command)
|
| 212 |
+
return parser
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def estimate_training_usage(bytes: int, mixed_precision: str, msamp_config: str = None) -> dict:
|
| 216 |
+
"""
|
| 217 |
+
Given an amount of `bytes` and `mixed_precision`, calculates how much training memory is needed for a batch size of
|
| 218 |
+
1.
|
| 219 |
+
|
| 220 |
+
Args:
|
| 221 |
+
bytes (`int`):
|
| 222 |
+
The size of the model being trained.
|
| 223 |
+
mixed_precision (`str`):
|
| 224 |
+
The mixed precision that would be ran.
|
| 225 |
+
msamp_config (`str`):
|
| 226 |
+
The msamp config to estimate the training memory for if `mixed_precision` is set to `"fp8"`.
|
| 227 |
+
"""
|
| 228 |
+
memory_sizes = {"model": -1, "optimizer": -1, "gradients": -1, "step": -1}
|
| 229 |
+
fp32_size = bytes
|
| 230 |
+
fp16_size = bytes // 2
|
| 231 |
+
|
| 232 |
+
if mixed_precision == "float32":
|
| 233 |
+
memory_sizes["model"] = fp32_size
|
| 234 |
+
memory_sizes["gradients"] = fp32_size
|
| 235 |
+
memory_sizes["optimizer"] = fp32_size * 2
|
| 236 |
+
memory_sizes["step"] = fp32_size * 4
|
| 237 |
+
elif mixed_precision in ("float16", "bfloat16") or (mixed_precision == "fp8" and msamp_config is None):
|
| 238 |
+
# With native `TransformersEngine`, there is no memory savings with FP8
|
| 239 |
+
# With mixed precision training, the model has weights stored
|
| 240 |
+
# in FP16 and FP32
|
| 241 |
+
memory_sizes["model"] = fp32_size
|
| 242 |
+
# 1.5 from weight gradient + computation (GEMM)
|
| 243 |
+
memory_sizes["gradients"] = fp32_size + fp16_size
|
| 244 |
+
# 2x from optimizer states
|
| 245 |
+
memory_sizes["optimizer"] = fp32_size * 2 # Optimizer states
|
| 246 |
+
memory_sizes["step"] = memory_sizes["optimizer"]
|
| 247 |
+
return memory_sizes
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
def gather_data(args):
|
| 251 |
+
"Creates an empty model and gathers the data for the sizes"
|
| 252 |
+
try:
|
| 253 |
+
model = create_empty_model(
|
| 254 |
+
args.model_name, library_name=args.library_name, trust_remote_code=args.trust_remote_code
|
| 255 |
+
)
|
| 256 |
+
except (RuntimeError, OSError) as e:
|
| 257 |
+
library = check_has_model(e)
|
| 258 |
+
if library != "unknown":
|
| 259 |
+
raise RuntimeError(
|
| 260 |
+
f"Tried to load `{args.model_name}` with `{library}` but a possible model to load was not found inside the repo."
|
| 261 |
+
)
|
| 262 |
+
raise e
|
| 263 |
+
|
| 264 |
+
total_size, largest_layer = calculate_maximum_sizes(model)
|
| 265 |
+
|
| 266 |
+
data = []
|
| 267 |
+
|
| 268 |
+
for dtype in args.dtypes:
|
| 269 |
+
dtype_total_size = total_size
|
| 270 |
+
dtype_largest_layer = largest_layer[0]
|
| 271 |
+
dtype_training_size = estimate_training_usage(dtype_total_size, dtype)
|
| 272 |
+
if dtype == "float16":
|
| 273 |
+
dtype_total_size /= 2
|
| 274 |
+
dtype_largest_layer /= 2
|
| 275 |
+
elif dtype == "int8":
|
| 276 |
+
dtype_total_size /= 4
|
| 277 |
+
dtype_largest_layer /= 4
|
| 278 |
+
elif dtype == "int4":
|
| 279 |
+
dtype_total_size /= 8
|
| 280 |
+
dtype_largest_layer /= 8
|
| 281 |
+
data.append([dtype, dtype_largest_layer, dtype_total_size, dtype_training_size])
|
| 282 |
+
return data
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
def estimate_command(args):
|
| 286 |
+
data = gather_data(args)
|
| 287 |
+
for row in data:
|
| 288 |
+
for i, item in enumerate(row):
|
| 289 |
+
if isinstance(item, (int, float)):
|
| 290 |
+
row[i] = convert_bytes(item)
|
| 291 |
+
elif isinstance(item, dict):
|
| 292 |
+
training_usage = max(item.values())
|
| 293 |
+
row[i] = convert_bytes(training_usage) if training_usage != -1 else "N/A"
|
| 294 |
+
|
| 295 |
+
headers = ["dtype", "Largest Layer", "Total Size", "Training using Adam"]
|
| 296 |
+
|
| 297 |
+
title = f"Memory Usage for loading `{args.model_name}`"
|
| 298 |
+
table = create_ascii_table(headers, data, title)
|
| 299 |
+
print(table)
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
def main():
|
| 303 |
+
parser = estimate_command_parser()
|
| 304 |
+
args = parser.parse_args()
|
| 305 |
+
estimate_command(args)
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
if __name__ == "__main__":
|
| 309 |
+
main()
|
venv/lib/python3.10/site-packages/accelerate/commands/launch.py
ADDED
|
@@ -0,0 +1,1085 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
|
| 3 |
+
# Copyright 2021 The HuggingFace Team. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
import argparse
|
| 18 |
+
import importlib
|
| 19 |
+
import logging
|
| 20 |
+
import os
|
| 21 |
+
import subprocess
|
| 22 |
+
import sys
|
| 23 |
+
from pathlib import Path
|
| 24 |
+
|
| 25 |
+
import psutil
|
| 26 |
+
import torch
|
| 27 |
+
|
| 28 |
+
from accelerate.commands.config import default_config_file, load_config_from_file
|
| 29 |
+
from accelerate.commands.config.config_args import SageMakerConfig
|
| 30 |
+
from accelerate.commands.config.config_utils import DYNAMO_BACKENDS
|
| 31 |
+
from accelerate.commands.utils import CustomArgumentParser
|
| 32 |
+
from accelerate.state import get_int_from_env
|
| 33 |
+
from accelerate.utils import (
|
| 34 |
+
ComputeEnvironment,
|
| 35 |
+
DistributedType,
|
| 36 |
+
PrepareForLaunch,
|
| 37 |
+
_filter_args,
|
| 38 |
+
check_cuda_p2p_ib_support,
|
| 39 |
+
convert_dict_to_env_variables,
|
| 40 |
+
is_bf16_available,
|
| 41 |
+
is_deepspeed_available,
|
| 42 |
+
is_mlu_available,
|
| 43 |
+
is_npu_available,
|
| 44 |
+
is_rich_available,
|
| 45 |
+
is_sagemaker_available,
|
| 46 |
+
is_torch_version,
|
| 47 |
+
is_torch_xla_available,
|
| 48 |
+
is_xpu_available,
|
| 49 |
+
patch_environment,
|
| 50 |
+
prepare_deepspeed_cmd_env,
|
| 51 |
+
prepare_multi_gpu_env,
|
| 52 |
+
prepare_sagemager_args_inputs,
|
| 53 |
+
prepare_simple_launcher_cmd_env,
|
| 54 |
+
prepare_tpu,
|
| 55 |
+
)
|
| 56 |
+
from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS, TORCH_DYNAMO_MODES
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
if is_rich_available():
|
| 60 |
+
from rich import get_console
|
| 61 |
+
from rich.logging import RichHandler
|
| 62 |
+
|
| 63 |
+
FORMAT = "%(message)s"
|
| 64 |
+
logging.basicConfig(format=FORMAT, datefmt="[%X]", handlers=[RichHandler()])
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
logger = logging.getLogger(__name__)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
options_to_group = {
|
| 71 |
+
"multi_gpu": "Distributed GPUs",
|
| 72 |
+
"tpu": "TPU",
|
| 73 |
+
"use_deepspeed": "DeepSpeed Arguments",
|
| 74 |
+
"use_fsdp": "FSDP Arguments",
|
| 75 |
+
"use_megatron_lm": "Megatron-LM Arguments",
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def clean_option(option):
|
| 80 |
+
"Finds all cases of - after the first two characters and changes them to _"
|
| 81 |
+
if option.startswith("--"):
|
| 82 |
+
return option[2:].replace("-", "_")
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
class CustomHelpFormatter(argparse.HelpFormatter):
|
| 86 |
+
"""
|
| 87 |
+
This is a custom help formatter that will hide all arguments that are not used in the command line when the help is
|
| 88 |
+
called. This is useful for the case where the user is using a specific platform and only wants to see the arguments
|
| 89 |
+
for that platform.
|
| 90 |
+
"""
|
| 91 |
+
|
| 92 |
+
def __init__(self, *args, **kwargs):
|
| 93 |
+
super().__init__(*args, **kwargs)
|
| 94 |
+
self.titles = [
|
| 95 |
+
"Hardware Selection Arguments",
|
| 96 |
+
"Resource Selection Arguments",
|
| 97 |
+
"Training Paradigm Arguments",
|
| 98 |
+
"positional arguments",
|
| 99 |
+
"optional arguments",
|
| 100 |
+
]
|
| 101 |
+
|
| 102 |
+
def add_argument(self, action: argparse.Action):
|
| 103 |
+
if "accelerate" in sys.argv[0] and "launch" in sys.argv[1:]:
|
| 104 |
+
args = sys.argv[2:]
|
| 105 |
+
else:
|
| 106 |
+
args = sys.argv[1:]
|
| 107 |
+
|
| 108 |
+
if len(args) > 1:
|
| 109 |
+
args = list(map(clean_option, args))
|
| 110 |
+
used_platforms = [arg for arg in args if arg in options_to_group.keys()]
|
| 111 |
+
used_titles = [options_to_group[o] for o in used_platforms]
|
| 112 |
+
if action.container.title not in self.titles + used_titles:
|
| 113 |
+
action.help = argparse.SUPPRESS
|
| 114 |
+
elif action.container.title == "Hardware Selection Arguments":
|
| 115 |
+
if set(action.option_strings).isdisjoint(set(args)):
|
| 116 |
+
action.help = argparse.SUPPRESS
|
| 117 |
+
else:
|
| 118 |
+
action.help = action.help + " (currently selected)"
|
| 119 |
+
elif action.container.title == "Training Paradigm Arguments":
|
| 120 |
+
if set(action.option_strings).isdisjoint(set(args)):
|
| 121 |
+
action.help = argparse.SUPPRESS
|
| 122 |
+
else:
|
| 123 |
+
action.help = action.help + " (currently selected)"
|
| 124 |
+
|
| 125 |
+
action.option_strings = [s for s in action.option_strings if "-" not in s[2:]]
|
| 126 |
+
super().add_argument(action)
|
| 127 |
+
|
| 128 |
+
def end_section(self):
|
| 129 |
+
if len(self._current_section.items) < 2:
|
| 130 |
+
self._current_section.items = []
|
| 131 |
+
self._current_section.heading = ""
|
| 132 |
+
super().end_section()
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def launch_command_parser(subparsers=None):
|
| 136 |
+
description = "Launch a python script in a distributed scenario. Arguments can be passed in with either hyphens (`--num-processes=2`) or underscores (`--num_processes=2`)"
|
| 137 |
+
if subparsers is not None:
|
| 138 |
+
parser = subparsers.add_parser(
|
| 139 |
+
"launch", description=description, add_help=False, allow_abbrev=False, formatter_class=CustomHelpFormatter
|
| 140 |
+
)
|
| 141 |
+
else:
|
| 142 |
+
parser = CustomArgumentParser(
|
| 143 |
+
"Accelerate launch command",
|
| 144 |
+
description=description,
|
| 145 |
+
add_help=False,
|
| 146 |
+
allow_abbrev=False,
|
| 147 |
+
formatter_class=CustomHelpFormatter,
|
| 148 |
+
)
|
| 149 |
+
|
| 150 |
+
parser.add_argument("-h", "--help", action="help", help="Show this help message and exit.")
|
| 151 |
+
|
| 152 |
+
parser.add_argument(
|
| 153 |
+
"--config_file",
|
| 154 |
+
default=None,
|
| 155 |
+
help="The config file to use for the default values in the launching script.",
|
| 156 |
+
)
|
| 157 |
+
parser.add_argument(
|
| 158 |
+
"--quiet",
|
| 159 |
+
"-q",
|
| 160 |
+
action="store_true",
|
| 161 |
+
help="Silence subprocess errors from the launch stack trace and only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations)",
|
| 162 |
+
)
|
| 163 |
+
# Hardware selection arguments
|
| 164 |
+
hardware_args = parser.add_argument_group(
|
| 165 |
+
"Hardware Selection Arguments", "Arguments for selecting the hardware to be used."
|
| 166 |
+
)
|
| 167 |
+
hardware_args.add_argument(
|
| 168 |
+
"--cpu", default=False, action="store_true", help="Whether or not to force the training on the CPU."
|
| 169 |
+
)
|
| 170 |
+
hardware_args.add_argument(
|
| 171 |
+
"--multi_gpu",
|
| 172 |
+
default=False,
|
| 173 |
+
action="store_true",
|
| 174 |
+
help="Whether or not this should launch a distributed GPU training.",
|
| 175 |
+
)
|
| 176 |
+
hardware_args.add_argument(
|
| 177 |
+
"--tpu", default=False, action="store_true", help="Whether or not this should launch a TPU training."
|
| 178 |
+
)
|
| 179 |
+
hardware_args.add_argument(
|
| 180 |
+
"--ipex",
|
| 181 |
+
default=False,
|
| 182 |
+
action="store_true",
|
| 183 |
+
help="Whether or not this should launch a Intel PyTorch Extension (IPEX) training.",
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
# Resource selection arguments
|
| 187 |
+
resource_args = parser.add_argument_group(
|
| 188 |
+
"Resource Selection Arguments", "Arguments for fine-tuning how available hardware should be used."
|
| 189 |
+
)
|
| 190 |
+
resource_args.add_argument(
|
| 191 |
+
"--mixed_precision",
|
| 192 |
+
type=str,
|
| 193 |
+
choices=["no", "fp16", "bf16", "fp8"],
|
| 194 |
+
help="Whether or not to use mixed precision training. "
|
| 195 |
+
"Choose between FP16 and BF16 (bfloat16) training. "
|
| 196 |
+
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.",
|
| 197 |
+
)
|
| 198 |
+
resource_args.add_argument(
|
| 199 |
+
"--num_processes", type=int, default=None, help="The total number of processes to be launched in parallel."
|
| 200 |
+
)
|
| 201 |
+
resource_args.add_argument(
|
| 202 |
+
"--num_machines", type=int, default=None, help="The total number of machines used in this training."
|
| 203 |
+
)
|
| 204 |
+
resource_args.add_argument(
|
| 205 |
+
"--num_cpu_threads_per_process",
|
| 206 |
+
type=int,
|
| 207 |
+
default=None,
|
| 208 |
+
help="The number of CPU threads per process. Can be tuned for optimal performance.",
|
| 209 |
+
)
|
| 210 |
+
resource_args.add_argument(
|
| 211 |
+
"--enable_cpu_affinity",
|
| 212 |
+
default=False,
|
| 213 |
+
action="store_true",
|
| 214 |
+
help="Whether or not CPU affinity and balancing should be enabled. Currently only supported on NVIDIA hardware.",
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
# Dynamo arguments
|
| 218 |
+
resource_args.add_argument(
|
| 219 |
+
"--dynamo_backend",
|
| 220 |
+
type=str,
|
| 221 |
+
choices=["no"] + [b.lower() for b in DYNAMO_BACKENDS],
|
| 222 |
+
help="Choose a backend to optimize your training with dynamo, see more at "
|
| 223 |
+
"https://github.com/pytorch/torchdynamo.",
|
| 224 |
+
)
|
| 225 |
+
resource_args.add_argument(
|
| 226 |
+
"--dynamo_mode",
|
| 227 |
+
type=str,
|
| 228 |
+
default="default",
|
| 229 |
+
choices=TORCH_DYNAMO_MODES,
|
| 230 |
+
help="Choose a mode to optimize your training with dynamo.",
|
| 231 |
+
)
|
| 232 |
+
resource_args.add_argument(
|
| 233 |
+
"--dynamo_use_fullgraph",
|
| 234 |
+
default=False,
|
| 235 |
+
action="store_true",
|
| 236 |
+
help="Whether to use full graph mode for dynamo or it is ok to break model into several subgraphs",
|
| 237 |
+
)
|
| 238 |
+
resource_args.add_argument(
|
| 239 |
+
"--dynamo_use_dynamic",
|
| 240 |
+
default=False,
|
| 241 |
+
action="store_true",
|
| 242 |
+
help="Whether to enable dynamic shape tracing.",
|
| 243 |
+
)
|
| 244 |
+
|
| 245 |
+
# Training Paradigm arguments
|
| 246 |
+
paradigm_args = parser.add_argument_group(
|
| 247 |
+
"Training Paradigm Arguments", "Arguments for selecting which training paradigm to be used."
|
| 248 |
+
)
|
| 249 |
+
paradigm_args.add_argument(
|
| 250 |
+
"--use_deepspeed",
|
| 251 |
+
default=False,
|
| 252 |
+
action="store_true",
|
| 253 |
+
help="Whether to use deepspeed.",
|
| 254 |
+
)
|
| 255 |
+
paradigm_args.add_argument(
|
| 256 |
+
"--use_fsdp",
|
| 257 |
+
default=False,
|
| 258 |
+
action="store_true",
|
| 259 |
+
help="Whether to use fsdp.",
|
| 260 |
+
)
|
| 261 |
+
paradigm_args.add_argument(
|
| 262 |
+
"--use_megatron_lm",
|
| 263 |
+
default=False,
|
| 264 |
+
action="store_true",
|
| 265 |
+
help="Whether to use Megatron-LM.",
|
| 266 |
+
)
|
| 267 |
+
paradigm_args.add_argument(
|
| 268 |
+
"--use_xpu",
|
| 269 |
+
default=False,
|
| 270 |
+
action="store_true",
|
| 271 |
+
help="Whether to use IPEX plugin to speed up training on XPU specifically.",
|
| 272 |
+
)
|
| 273 |
+
|
| 274 |
+
# distributed GPU training arguments
|
| 275 |
+
distributed_args = parser.add_argument_group("Distributed GPUs", "Arguments related to distributed GPU training.")
|
| 276 |
+
distributed_args.add_argument(
|
| 277 |
+
"--gpu_ids",
|
| 278 |
+
default=None,
|
| 279 |
+
help="What GPUs (by id) should be used for training on this machine as a comma-seperated list",
|
| 280 |
+
)
|
| 281 |
+
distributed_args.add_argument(
|
| 282 |
+
"--same_network",
|
| 283 |
+
default=False,
|
| 284 |
+
action="store_true",
|
| 285 |
+
help="Whether all machines used for multinode training exist on the same local network.",
|
| 286 |
+
)
|
| 287 |
+
distributed_args.add_argument(
|
| 288 |
+
"--machine_rank", type=int, default=None, help="The rank of the machine on which this script is launched."
|
| 289 |
+
)
|
| 290 |
+
distributed_args.add_argument(
|
| 291 |
+
"--main_process_ip", type=str, default=None, help="The IP address of the machine of rank 0."
|
| 292 |
+
)
|
| 293 |
+
distributed_args.add_argument(
|
| 294 |
+
"--main_process_port",
|
| 295 |
+
type=int,
|
| 296 |
+
default=None,
|
| 297 |
+
help="The port to use to communicate with the machine of rank 0.",
|
| 298 |
+
)
|
| 299 |
+
distributed_args.add_argument(
|
| 300 |
+
"-t",
|
| 301 |
+
"--tee",
|
| 302 |
+
default="0",
|
| 303 |
+
type=str,
|
| 304 |
+
help="Tee std streams into a log file and also to console.",
|
| 305 |
+
)
|
| 306 |
+
distributed_args.add_argument(
|
| 307 |
+
"--role",
|
| 308 |
+
type=str,
|
| 309 |
+
default="default",
|
| 310 |
+
help="User-defined role for the workers.",
|
| 311 |
+
)
|
| 312 |
+
# Rendezvous related arguments
|
| 313 |
+
distributed_args.add_argument(
|
| 314 |
+
"--rdzv_backend",
|
| 315 |
+
type=str,
|
| 316 |
+
default="static",
|
| 317 |
+
help="The rendezvous method to use, such as 'static' (the default) or 'c10d'",
|
| 318 |
+
)
|
| 319 |
+
distributed_args.add_argument(
|
| 320 |
+
"--rdzv_conf",
|
| 321 |
+
type=str,
|
| 322 |
+
default="",
|
| 323 |
+
help="Additional rendezvous configuration (<key1>=<value1>,<key2>=<value2>,...).",
|
| 324 |
+
)
|
| 325 |
+
distributed_args.add_argument(
|
| 326 |
+
"--max_restarts",
|
| 327 |
+
type=int,
|
| 328 |
+
default=0,
|
| 329 |
+
help="Maximum number of worker group restarts before failing.",
|
| 330 |
+
)
|
| 331 |
+
distributed_args.add_argument(
|
| 332 |
+
"--monitor_interval",
|
| 333 |
+
type=float,
|
| 334 |
+
default=5,
|
| 335 |
+
help="Interval, in seconds, to monitor the state of workers.",
|
| 336 |
+
)
|
| 337 |
+
parser.add_argument(
|
| 338 |
+
"-m",
|
| 339 |
+
"--module",
|
| 340 |
+
action="store_true",
|
| 341 |
+
help="Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.",
|
| 342 |
+
)
|
| 343 |
+
parser.add_argument(
|
| 344 |
+
"--no_python",
|
| 345 |
+
action="store_true",
|
| 346 |
+
help="Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.",
|
| 347 |
+
)
|
| 348 |
+
|
| 349 |
+
# TPU arguments
|
| 350 |
+
tpu_args = parser.add_argument_group("TPU", "Arguments related to TPU.")
|
| 351 |
+
tpu_args.add_argument(
|
| 352 |
+
"--tpu_cluster",
|
| 353 |
+
action="store_true",
|
| 354 |
+
dest="tpu_use_cluster",
|
| 355 |
+
help="Whether to use a GCP TPU pod for training.",
|
| 356 |
+
)
|
| 357 |
+
tpu_args.add_argument(
|
| 358 |
+
"--no_tpu_cluster",
|
| 359 |
+
action="store_false",
|
| 360 |
+
dest="tpu_use_cluster",
|
| 361 |
+
help="Should not be passed explicitly, this is for internal use only.",
|
| 362 |
+
)
|
| 363 |
+
tpu_args.add_argument(
|
| 364 |
+
"--tpu_use_sudo",
|
| 365 |
+
action="store_true",
|
| 366 |
+
help="Whether to use `sudo` when running the TPU training script in each pod.",
|
| 367 |
+
)
|
| 368 |
+
tpu_args.add_argument(
|
| 369 |
+
"--vm",
|
| 370 |
+
type=str,
|
| 371 |
+
action="append",
|
| 372 |
+
help=(
|
| 373 |
+
"List of single Compute VM instance names. "
|
| 374 |
+
"If not provided we assume usage of instance groups. For TPU pods."
|
| 375 |
+
),
|
| 376 |
+
)
|
| 377 |
+
tpu_args.add_argument(
|
| 378 |
+
"--env",
|
| 379 |
+
type=str,
|
| 380 |
+
action="append",
|
| 381 |
+
help="List of environment variables to set on the Compute VM instances. For TPU pods.",
|
| 382 |
+
)
|
| 383 |
+
tpu_args.add_argument(
|
| 384 |
+
"--main_training_function",
|
| 385 |
+
type=str,
|
| 386 |
+
default=None,
|
| 387 |
+
help="The name of the main function to be executed in your script (only for TPU training).",
|
| 388 |
+
)
|
| 389 |
+
tpu_args.add_argument(
|
| 390 |
+
"--downcast_bf16",
|
| 391 |
+
action="store_true",
|
| 392 |
+
help="Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.",
|
| 393 |
+
)
|
| 394 |
+
|
| 395 |
+
# DeepSpeed arguments
|
| 396 |
+
deepspeed_args = parser.add_argument_group("DeepSpeed Arguments", "Arguments related to DeepSpeed.")
|
| 397 |
+
deepspeed_args.add_argument(
|
| 398 |
+
"--deepspeed_config_file",
|
| 399 |
+
default=None,
|
| 400 |
+
type=str,
|
| 401 |
+
help="DeepSpeed config file.",
|
| 402 |
+
)
|
| 403 |
+
deepspeed_args.add_argument(
|
| 404 |
+
"--zero_stage",
|
| 405 |
+
default=None,
|
| 406 |
+
type=int,
|
| 407 |
+
help="DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). "
|
| 408 |
+
"If unspecified, will default to `2`.",
|
| 409 |
+
)
|
| 410 |
+
deepspeed_args.add_argument(
|
| 411 |
+
"--offload_optimizer_device",
|
| 412 |
+
default=None,
|
| 413 |
+
type=str,
|
| 414 |
+
help="Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). "
|
| 415 |
+
"If unspecified, will default to 'none'.",
|
| 416 |
+
)
|
| 417 |
+
deepspeed_args.add_argument(
|
| 418 |
+
"--offload_param_device",
|
| 419 |
+
default=None,
|
| 420 |
+
type=str,
|
| 421 |
+
help="Decides where (none|cpu|nvme) to offload parameters (useful only when `use_deepspeed` flag is passed). "
|
| 422 |
+
"If unspecified, will default to 'none'.",
|
| 423 |
+
)
|
| 424 |
+
deepspeed_args.add_argument(
|
| 425 |
+
"--offload_optimizer_nvme_path",
|
| 426 |
+
default=None,
|
| 427 |
+
type=str,
|
| 428 |
+
help="Decides Nvme Path to offload optimizer states (useful only when `use_deepspeed` flag is passed). "
|
| 429 |
+
"If unspecified, will default to 'none'.",
|
| 430 |
+
)
|
| 431 |
+
deepspeed_args.add_argument(
|
| 432 |
+
"--offload_param_nvme_path",
|
| 433 |
+
default=None,
|
| 434 |
+
type=str,
|
| 435 |
+
help="Decides Nvme Path to offload parameters (useful only when `use_deepspeed` flag is passed). "
|
| 436 |
+
"If unspecified, will default to 'none'.",
|
| 437 |
+
)
|
| 438 |
+
deepspeed_args.add_argument(
|
| 439 |
+
"--gradient_accumulation_steps",
|
| 440 |
+
default=None,
|
| 441 |
+
type=int,
|
| 442 |
+
help="No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed). "
|
| 443 |
+
"If unspecified, will default to `1`.",
|
| 444 |
+
)
|
| 445 |
+
deepspeed_args.add_argument(
|
| 446 |
+
"--gradient_clipping",
|
| 447 |
+
default=None,
|
| 448 |
+
type=float,
|
| 449 |
+
help="gradient clipping value used in your training script (useful only when `use_deepspeed` flag is passed). "
|
| 450 |
+
"If unspecified, will default to `1.0`.",
|
| 451 |
+
)
|
| 452 |
+
deepspeed_args.add_argument(
|
| 453 |
+
"--zero3_init_flag",
|
| 454 |
+
default=None,
|
| 455 |
+
type=str,
|
| 456 |
+
help="Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. "
|
| 457 |
+
"Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `true`.",
|
| 458 |
+
)
|
| 459 |
+
deepspeed_args.add_argument(
|
| 460 |
+
"--zero3_save_16bit_model",
|
| 461 |
+
default=None,
|
| 462 |
+
type=str,
|
| 463 |
+
help="Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. "
|
| 464 |
+
"Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `false`.",
|
| 465 |
+
)
|
| 466 |
+
deepspeed_args.add_argument(
|
| 467 |
+
"--deepspeed_hostfile",
|
| 468 |
+
default=None,
|
| 469 |
+
type=str,
|
| 470 |
+
help="DeepSpeed hostfile for configuring multi-node compute resources.",
|
| 471 |
+
)
|
| 472 |
+
deepspeed_args.add_argument(
|
| 473 |
+
"--deepspeed_exclusion_filter",
|
| 474 |
+
default=None,
|
| 475 |
+
type=str,
|
| 476 |
+
help="DeepSpeed exclusion filter string when using mutli-node setup.",
|
| 477 |
+
)
|
| 478 |
+
deepspeed_args.add_argument(
|
| 479 |
+
"--deepspeed_inclusion_filter",
|
| 480 |
+
default=None,
|
| 481 |
+
type=str,
|
| 482 |
+
help="DeepSpeed inclusion filter string when using mutli-node setup.",
|
| 483 |
+
)
|
| 484 |
+
deepspeed_args.add_argument(
|
| 485 |
+
"--deepspeed_multinode_launcher",
|
| 486 |
+
default=None,
|
| 487 |
+
type=str,
|
| 488 |
+
help="DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.",
|
| 489 |
+
)
|
| 490 |
+
|
| 491 |
+
# fsdp arguments
|
| 492 |
+
fsdp_args = parser.add_argument_group("FSDP Arguments", "Arguments related to Fully Shared Data Parallelism.")
|
| 493 |
+
fsdp_args.add_argument(
|
| 494 |
+
"--fsdp_offload_params",
|
| 495 |
+
default="false",
|
| 496 |
+
type=str,
|
| 497 |
+
help="Decides Whether (true|false) to offload parameters and gradients to CPU. (useful only when `use_fsdp` flag is passed).",
|
| 498 |
+
)
|
| 499 |
+
fsdp_args.add_argument(
|
| 500 |
+
"--fsdp_min_num_params",
|
| 501 |
+
type=int,
|
| 502 |
+
default=1e8,
|
| 503 |
+
help="FSDP's minimum number of parameters for Default Auto Wrapping. (useful only when `use_fsdp` flag is passed).",
|
| 504 |
+
)
|
| 505 |
+
fsdp_args.add_argument(
|
| 506 |
+
"--fsdp_sharding_strategy",
|
| 507 |
+
type=str,
|
| 508 |
+
default="FULL_SHARD",
|
| 509 |
+
help="FSDP's Sharding Strategy. (useful only when `use_fsdp` flag is passed).",
|
| 510 |
+
)
|
| 511 |
+
fsdp_args.add_argument(
|
| 512 |
+
"--fsdp_auto_wrap_policy",
|
| 513 |
+
type=str,
|
| 514 |
+
default=None,
|
| 515 |
+
help="FSDP's auto wrap policy. (useful only when `use_fsdp` flag is passed).",
|
| 516 |
+
)
|
| 517 |
+
fsdp_args.add_argument(
|
| 518 |
+
"--fsdp_transformer_layer_cls_to_wrap",
|
| 519 |
+
default=None,
|
| 520 |
+
type=str,
|
| 521 |
+
help="Transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block` .... "
|
| 522 |
+
"(useful only when `use_fsdp` flag is passed).",
|
| 523 |
+
)
|
| 524 |
+
fsdp_args.add_argument(
|
| 525 |
+
"--fsdp_backward_prefetch_policy",
|
| 526 |
+
default=None,
|
| 527 |
+
type=str,
|
| 528 |
+
help="This argument is deprecated and will be removed in version 0.27.0 of 🤗 Accelerate. Use `fsdp_backward_prefetch` instead.",
|
| 529 |
+
)
|
| 530 |
+
fsdp_args.add_argument(
|
| 531 |
+
"--fsdp_backward_prefetch",
|
| 532 |
+
default=None,
|
| 533 |
+
type=str,
|
| 534 |
+
help="FSDP's backward prefetch policy. (useful only when `use_fsdp` flag is passed).",
|
| 535 |
+
)
|
| 536 |
+
fsdp_args.add_argument(
|
| 537 |
+
"--fsdp_state_dict_type",
|
| 538 |
+
default=None,
|
| 539 |
+
type=str,
|
| 540 |
+
help="FSDP's state dict type. (useful only when `use_fsdp` flag is passed).",
|
| 541 |
+
)
|
| 542 |
+
fsdp_args.add_argument(
|
| 543 |
+
"--fsdp_forward_prefetch",
|
| 544 |
+
default="false",
|
| 545 |
+
type=str,
|
| 546 |
+
help="If True, then FSDP explicitly prefetches the next upcoming "
|
| 547 |
+
"all-gather while executing in the forward pass (useful only when `use_fsdp` flag is passed).",
|
| 548 |
+
)
|
| 549 |
+
fsdp_args.add_argument(
|
| 550 |
+
"--fsdp_use_orig_params",
|
| 551 |
+
default="true",
|
| 552 |
+
type=str,
|
| 553 |
+
help="If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable paramteres."
|
| 554 |
+
" (useful only when `use_fsdp` flag is passed).",
|
| 555 |
+
)
|
| 556 |
+
fsdp_args.add_argument(
|
| 557 |
+
"--fsdp_cpu_ram_efficient_loading",
|
| 558 |
+
default="true",
|
| 559 |
+
type=str,
|
| 560 |
+
help="If True, only the first process loads the pretrained model checkoint while all other processes have empty weights. "
|
| 561 |
+
"Only applicable for 🤗 Transformers. When using this, `--fsdp_sync_module_states` needs to True. "
|
| 562 |
+
"(useful only when `use_fsdp` flag is passed).",
|
| 563 |
+
)
|
| 564 |
+
fsdp_args.add_argument(
|
| 565 |
+
"--fsdp_sync_module_states",
|
| 566 |
+
default="true",
|
| 567 |
+
type=str,
|
| 568 |
+
help="If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0."
|
| 569 |
+
" (useful only when `use_fsdp` flag is passed).",
|
| 570 |
+
)
|
| 571 |
+
|
| 572 |
+
# megatron_lm args
|
| 573 |
+
megatron_lm_args = parser.add_argument_group("Megatron-LM Arguments", "Arguments related to Megatron-LM.")
|
| 574 |
+
megatron_lm_args.add_argument(
|
| 575 |
+
"--megatron_lm_tp_degree",
|
| 576 |
+
type=int,
|
| 577 |
+
default=1,
|
| 578 |
+
help="Megatron-LM's Tensor Parallelism (TP) degree. (useful only when `use_megatron_lm` flag is passed).",
|
| 579 |
+
)
|
| 580 |
+
megatron_lm_args.add_argument(
|
| 581 |
+
"--megatron_lm_pp_degree",
|
| 582 |
+
type=int,
|
| 583 |
+
default=1,
|
| 584 |
+
help="Megatron-LM's Pipeline Parallelism (PP) degree. (useful only when `use_megatron_lm` flag is passed).",
|
| 585 |
+
)
|
| 586 |
+
megatron_lm_args.add_argument(
|
| 587 |
+
"--megatron_lm_num_micro_batches",
|
| 588 |
+
type=int,
|
| 589 |
+
default=None,
|
| 590 |
+
help="Megatron-LM's number of micro batches when PP degree > 1. (useful only when `use_megatron_lm` flag is passed).",
|
| 591 |
+
)
|
| 592 |
+
megatron_lm_args.add_argument(
|
| 593 |
+
"--megatron_lm_sequence_parallelism",
|
| 594 |
+
default=None,
|
| 595 |
+
type=str,
|
| 596 |
+
help="Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1. "
|
| 597 |
+
"(useful only when `use_megatron_lm` flag is passed).",
|
| 598 |
+
)
|
| 599 |
+
megatron_lm_args.add_argument(
|
| 600 |
+
"--megatron_lm_recompute_activations",
|
| 601 |
+
default=None,
|
| 602 |
+
type=str,
|
| 603 |
+
help="Decides Whether (true|false) to enable Selective Activation Recomputation. "
|
| 604 |
+
"(useful only when `use_megatron_lm` flag is passed).",
|
| 605 |
+
)
|
| 606 |
+
megatron_lm_args.add_argument(
|
| 607 |
+
"--megatron_lm_use_distributed_optimizer",
|
| 608 |
+
default=None,
|
| 609 |
+
type=str,
|
| 610 |
+
help="Decides Whether (true|false) to use distributed optimizer "
|
| 611 |
+
"which shards optimizer state and gradients across Data Pralellel (DP) ranks. "
|
| 612 |
+
"(useful only when `use_megatron_lm` flag is passed).",
|
| 613 |
+
)
|
| 614 |
+
megatron_lm_args.add_argument(
|
| 615 |
+
"--megatron_lm_gradient_clipping",
|
| 616 |
+
default=1.0,
|
| 617 |
+
type=float,
|
| 618 |
+
help="Megatron-LM's gradient clipping value based on global L2 Norm (0 to disable). "
|
| 619 |
+
"(useful only when `use_megatron_lm` flag is passed).",
|
| 620 |
+
)
|
| 621 |
+
|
| 622 |
+
# AWS arguments
|
| 623 |
+
aws_args = parser.add_argument_group("AWS Arguments", "Arguments related to AWS.")
|
| 624 |
+
aws_args.add_argument(
|
| 625 |
+
"--aws_access_key_id",
|
| 626 |
+
type=str,
|
| 627 |
+
default=None,
|
| 628 |
+
help="The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job",
|
| 629 |
+
)
|
| 630 |
+
aws_args.add_argument(
|
| 631 |
+
"--aws_secret_access_key",
|
| 632 |
+
type=str,
|
| 633 |
+
default=None,
|
| 634 |
+
help="The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job.",
|
| 635 |
+
)
|
| 636 |
+
parser.add_argument(
|
| 637 |
+
"--debug",
|
| 638 |
+
action="store_true",
|
| 639 |
+
help="Whether to print out the torch.distributed stack trace when something fails.",
|
| 640 |
+
)
|
| 641 |
+
parser.add_argument(
|
| 642 |
+
"training_script",
|
| 643 |
+
type=str,
|
| 644 |
+
help=(
|
| 645 |
+
"The full path to the script to be launched in parallel, followed by all the arguments for the training "
|
| 646 |
+
"script."
|
| 647 |
+
),
|
| 648 |
+
)
|
| 649 |
+
|
| 650 |
+
# MPI arguments
|
| 651 |
+
mpirun_args = parser.add_argument_group("MPI Arguments", "Arguments related to mpirun for Multi-CPU")
|
| 652 |
+
mpirun_args.add_argument(
|
| 653 |
+
"--mpirun_hostfile",
|
| 654 |
+
type=str,
|
| 655 |
+
default=None,
|
| 656 |
+
help="Location for a hostfile for using Accelerate to launch a multi-CPU training job with mpirun. This will "
|
| 657 |
+
"get passed to the MPI --hostfile or -f parameter, depending on which MPI program is installed.",
|
| 658 |
+
)
|
| 659 |
+
mpirun_args.add_argument(
|
| 660 |
+
"--mpirun_ccl",
|
| 661 |
+
type=int,
|
| 662 |
+
default=1,
|
| 663 |
+
help="The number of oneCCL worker threads when using Accelerate to launch multi-CPU training with mpirun.",
|
| 664 |
+
)
|
| 665 |
+
|
| 666 |
+
# Other arguments of the training scripts
|
| 667 |
+
parser.add_argument("training_script_args", nargs=argparse.REMAINDER, help="Arguments of the training script.")
|
| 668 |
+
|
| 669 |
+
if subparsers is not None:
|
| 670 |
+
parser.set_defaults(func=launch_command)
|
| 671 |
+
return parser
|
| 672 |
+
|
| 673 |
+
|
| 674 |
+
def simple_launcher(args):
|
| 675 |
+
cmd, current_env = prepare_simple_launcher_cmd_env(args)
|
| 676 |
+
|
| 677 |
+
process = subprocess.Popen(cmd, env=current_env)
|
| 678 |
+
process.wait()
|
| 679 |
+
if process.returncode != 0:
|
| 680 |
+
if not args.quiet:
|
| 681 |
+
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
|
| 682 |
+
else:
|
| 683 |
+
sys.exit(1)
|
| 684 |
+
|
| 685 |
+
|
| 686 |
+
def multi_gpu_launcher(args):
|
| 687 |
+
import torch.distributed.run as distrib_run
|
| 688 |
+
|
| 689 |
+
current_env = prepare_multi_gpu_env(args)
|
| 690 |
+
if not check_cuda_p2p_ib_support():
|
| 691 |
+
message = "Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled."
|
| 692 |
+
warn = False
|
| 693 |
+
if "NCCL_P2P_DISABLE" not in current_env:
|
| 694 |
+
current_env["NCCL_P2P_DISABLE"] = "1"
|
| 695 |
+
warn = True
|
| 696 |
+
if "NCCL_IB_DISABLE" not in current_env:
|
| 697 |
+
current_env["NCCL_IB_DISABLE"] = "1"
|
| 698 |
+
warn = True
|
| 699 |
+
if warn:
|
| 700 |
+
logger.warning(message)
|
| 701 |
+
|
| 702 |
+
debug = getattr(args, "debug", False)
|
| 703 |
+
args = _filter_args(
|
| 704 |
+
args,
|
| 705 |
+
distrib_run.get_args_parser(),
|
| 706 |
+
["--training_script", args.training_script, "--training_script_args", args.training_script_args],
|
| 707 |
+
)
|
| 708 |
+
|
| 709 |
+
with patch_environment(**current_env):
|
| 710 |
+
try:
|
| 711 |
+
distrib_run.run(args)
|
| 712 |
+
except Exception:
|
| 713 |
+
if is_rich_available() and debug:
|
| 714 |
+
console = get_console()
|
| 715 |
+
console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]")
|
| 716 |
+
console.print_exception(suppress=[__file__], show_locals=False)
|
| 717 |
+
else:
|
| 718 |
+
raise
|
| 719 |
+
|
| 720 |
+
|
| 721 |
+
def deepspeed_launcher(args):
|
| 722 |
+
import torch.distributed.run as distrib_run
|
| 723 |
+
|
| 724 |
+
if not is_deepspeed_available():
|
| 725 |
+
raise ImportError("DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.")
|
| 726 |
+
else:
|
| 727 |
+
from deepspeed.launcher.runner import DEEPSPEED_ENVIRONMENT_NAME
|
| 728 |
+
|
| 729 |
+
cmd, current_env = prepare_deepspeed_cmd_env(args)
|
| 730 |
+
if not check_cuda_p2p_ib_support():
|
| 731 |
+
message = "Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled."
|
| 732 |
+
warn = False
|
| 733 |
+
if "NCCL_P2P_DISABLE" not in current_env:
|
| 734 |
+
current_env["NCCL_P2P_DISABLE"] = "1"
|
| 735 |
+
warn = True
|
| 736 |
+
if "NCCL_IB_DISABLE" not in current_env:
|
| 737 |
+
current_env["NCCL_IB_DISABLE"] = "1"
|
| 738 |
+
warn = True
|
| 739 |
+
if warn:
|
| 740 |
+
logger.warning(message)
|
| 741 |
+
|
| 742 |
+
if args.num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
|
| 743 |
+
with open(DEEPSPEED_ENVIRONMENT_NAME, "a") as f:
|
| 744 |
+
valid_env_items = convert_dict_to_env_variables(current_env)
|
| 745 |
+
if len(valid_env_items) > 1:
|
| 746 |
+
f.writelines(valid_env_items)
|
| 747 |
+
|
| 748 |
+
process = subprocess.Popen(cmd, env=current_env)
|
| 749 |
+
process.wait()
|
| 750 |
+
if process.returncode != 0:
|
| 751 |
+
if not args.quiet:
|
| 752 |
+
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
|
| 753 |
+
else:
|
| 754 |
+
sys.exit(1)
|
| 755 |
+
else:
|
| 756 |
+
debug = getattr(args, "debug", False)
|
| 757 |
+
args = _filter_args(
|
| 758 |
+
args,
|
| 759 |
+
distrib_run.get_args_parser(),
|
| 760 |
+
["--training_script", args.training_script, "--training_script_args", args.training_script_args],
|
| 761 |
+
)
|
| 762 |
+
with patch_environment(**current_env):
|
| 763 |
+
try:
|
| 764 |
+
distrib_run.run(args)
|
| 765 |
+
except Exception:
|
| 766 |
+
if is_rich_available() and debug:
|
| 767 |
+
console = get_console()
|
| 768 |
+
console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]")
|
| 769 |
+
console.print_exception(suppress=[__file__], show_locals=False)
|
| 770 |
+
else:
|
| 771 |
+
raise
|
| 772 |
+
|
| 773 |
+
|
| 774 |
+
def tpu_launcher(args):
|
| 775 |
+
import torch_xla.distributed.xla_multiprocessing as xmp
|
| 776 |
+
|
| 777 |
+
if args.no_python:
|
| 778 |
+
raise ValueError("--no_python cannot be used with TPU launcher")
|
| 779 |
+
|
| 780 |
+
args, current_env = prepare_tpu(args, {})
|
| 781 |
+
|
| 782 |
+
if args.module:
|
| 783 |
+
mod_name = args.training_script
|
| 784 |
+
else:
|
| 785 |
+
# Import training_script as a module
|
| 786 |
+
script_path = Path(args.training_script)
|
| 787 |
+
sys.path.append(str(script_path.parent.resolve()))
|
| 788 |
+
mod_name = script_path.stem
|
| 789 |
+
|
| 790 |
+
mod = importlib.import_module(mod_name)
|
| 791 |
+
if not hasattr(mod, args.main_training_function):
|
| 792 |
+
raise ValueError(
|
| 793 |
+
f"Your training script should have a function named {args.main_training_function}, or you should pass a "
|
| 794 |
+
"different value to `--main_training_function`."
|
| 795 |
+
)
|
| 796 |
+
|
| 797 |
+
# Patch sys.argv
|
| 798 |
+
sys.argv = [mod.__file__] + args.training_script_args
|
| 799 |
+
|
| 800 |
+
main_function = getattr(mod, args.main_training_function)
|
| 801 |
+
with patch_environment(**current_env):
|
| 802 |
+
xmp.spawn(PrepareForLaunch(main_function), args=(), nprocs=args.num_processes)
|
| 803 |
+
|
| 804 |
+
|
| 805 |
+
def tpu_pod_launcher(args):
|
| 806 |
+
from torch_xla.distributed import xla_dist
|
| 807 |
+
|
| 808 |
+
current_env = {}
|
| 809 |
+
args, current_env = prepare_tpu(args, current_env, True)
|
| 810 |
+
debug = getattr(args, "debug", False)
|
| 811 |
+
|
| 812 |
+
training_script = args.training_script
|
| 813 |
+
training_script_args = args.training_script_args
|
| 814 |
+
new_args = _filter_args(
|
| 815 |
+
args, xla_dist.get_args_parser(), ["--tpu", args.tpu_name, "--positional", "", "--restart-tpuvm-pod-server"]
|
| 816 |
+
)
|
| 817 |
+
|
| 818 |
+
if args.tpu_use_sudo:
|
| 819 |
+
new_cmd = ["sudo"]
|
| 820 |
+
else:
|
| 821 |
+
new_cmd = []
|
| 822 |
+
|
| 823 |
+
new_cmd += [
|
| 824 |
+
"accelerate-launch",
|
| 825 |
+
"--tpu",
|
| 826 |
+
"--no_tpu_cluster",
|
| 827 |
+
"--num_machines",
|
| 828 |
+
"1",
|
| 829 |
+
"--mixed_precision",
|
| 830 |
+
"no",
|
| 831 |
+
"--dynamo_backend",
|
| 832 |
+
"no",
|
| 833 |
+
"--num_processes",
|
| 834 |
+
str(args.num_processes),
|
| 835 |
+
"--main_training_function",
|
| 836 |
+
str(args.main_training_function),
|
| 837 |
+
training_script,
|
| 838 |
+
] + training_script_args
|
| 839 |
+
|
| 840 |
+
new_args.positional = new_cmd
|
| 841 |
+
bad_flags = ""
|
| 842 |
+
for arg in vars(new_args):
|
| 843 |
+
if arg.startswith("docker_"):
|
| 844 |
+
value = getattr(new_args, arg)
|
| 845 |
+
if value != "" and value is not None:
|
| 846 |
+
bad_flags += f'{arg}="{value}"\n'
|
| 847 |
+
if bad_flags != "":
|
| 848 |
+
raise ValueError(
|
| 849 |
+
f"Docker containers are not supported for TPU pod launcher currently, please remove the following flags:\n{bad_flags}"
|
| 850 |
+
)
|
| 851 |
+
new_args.env = [f"{k}={v}" for k, v in current_env.items()]
|
| 852 |
+
new_args.env.append("ACCELERATE_IN_TPU_POD=1")
|
| 853 |
+
try:
|
| 854 |
+
xla_dist.resolve_and_execute(new_args)
|
| 855 |
+
except Exception:
|
| 856 |
+
if is_rich_available() and debug:
|
| 857 |
+
console = get_console()
|
| 858 |
+
console.print("\n[bold red]Using --debug, `torch_xla.xla_dist` Stack Trace:[/bold red]")
|
| 859 |
+
console.print_exception(suppress=[__file__], show_locals=False)
|
| 860 |
+
else:
|
| 861 |
+
raise
|
| 862 |
+
|
| 863 |
+
|
| 864 |
+
def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):
|
| 865 |
+
if not is_sagemaker_available():
|
| 866 |
+
raise ImportError(
|
| 867 |
+
"Please install sagemaker to be able to launch training on Amazon SageMaker with `pip install accelerate[sagemaker]`"
|
| 868 |
+
)
|
| 869 |
+
if args.module or args.no_python:
|
| 870 |
+
raise ValueError(
|
| 871 |
+
"SageMaker requires a python training script file and cannot be used with --module or --no_python"
|
| 872 |
+
)
|
| 873 |
+
|
| 874 |
+
from sagemaker.huggingface import HuggingFace
|
| 875 |
+
|
| 876 |
+
args, sagemaker_inputs = prepare_sagemager_args_inputs(sagemaker_config, args)
|
| 877 |
+
|
| 878 |
+
huggingface_estimator = HuggingFace(**args)
|
| 879 |
+
|
| 880 |
+
huggingface_estimator.fit(inputs=sagemaker_inputs)
|
| 881 |
+
print(f"You can find your model data at: {huggingface_estimator.model_data}")
|
| 882 |
+
|
| 883 |
+
|
| 884 |
+
def _validate_launch_command(args):
|
| 885 |
+
# Sanity checks
|
| 886 |
+
if sum([args.multi_gpu, args.cpu, args.tpu, args.use_deepspeed, args.use_fsdp]) > 1:
|
| 887 |
+
raise ValueError(
|
| 888 |
+
"You can only use one of `--cpu`, `--multi_gpu`, `--tpu`, `--use_deepspeed`, `--use_fsdp` at a time."
|
| 889 |
+
)
|
| 890 |
+
if args.multi_gpu and (args.num_processes is not None) and (args.num_processes < 2):
|
| 891 |
+
raise ValueError("You need to use at least 2 processes to use `--multi_gpu`.")
|
| 892 |
+
|
| 893 |
+
defaults = None
|
| 894 |
+
warned = []
|
| 895 |
+
mp_from_config_flag = False
|
| 896 |
+
# Get the default from the config file.
|
| 897 |
+
if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu:
|
| 898 |
+
defaults = load_config_from_file(args.config_file)
|
| 899 |
+
if (
|
| 900 |
+
not args.multi_gpu
|
| 901 |
+
and not args.tpu
|
| 902 |
+
and not args.tpu_use_cluster
|
| 903 |
+
and not args.use_deepspeed
|
| 904 |
+
and not args.use_fsdp
|
| 905 |
+
and not args.use_megatron_lm
|
| 906 |
+
):
|
| 907 |
+
args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED
|
| 908 |
+
args.multi_gpu = (
|
| 909 |
+
True
|
| 910 |
+
if defaults.distributed_type
|
| 911 |
+
in (
|
| 912 |
+
DistributedType.MULTI_GPU,
|
| 913 |
+
DistributedType.MULTI_NPU,
|
| 914 |
+
DistributedType.MULTI_MLU,
|
| 915 |
+
DistributedType.MULTI_XPU,
|
| 916 |
+
)
|
| 917 |
+
else False
|
| 918 |
+
)
|
| 919 |
+
args.tpu = defaults.distributed_type == DistributedType.XLA
|
| 920 |
+
args.use_fsdp = defaults.distributed_type == DistributedType.FSDP
|
| 921 |
+
args.use_megatron_lm = defaults.distributed_type == DistributedType.MEGATRON_LM
|
| 922 |
+
args.tpu_use_cluster = defaults.tpu_use_cluster if args.tpu else False
|
| 923 |
+
if args.gpu_ids is None:
|
| 924 |
+
if defaults.gpu_ids is not None:
|
| 925 |
+
args.gpu_ids = defaults.gpu_ids
|
| 926 |
+
else:
|
| 927 |
+
args.gpu_ids = "all"
|
| 928 |
+
|
| 929 |
+
if args.multi_gpu and args.num_machines is None:
|
| 930 |
+
args.num_machines = defaults.num_machines
|
| 931 |
+
|
| 932 |
+
if len(args.gpu_ids.split(",")) < 2 and (args.gpu_ids != "all") and args.multi_gpu and args.num_machines <= 1:
|
| 933 |
+
raise ValueError(
|
| 934 |
+
"Less than two GPU ids were configured and tried to run on on multiple GPUs. "
|
| 935 |
+
"Please ensure at least two are specified for `--gpu_ids`, or use `--gpu_ids='all'`."
|
| 936 |
+
)
|
| 937 |
+
if defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE:
|
| 938 |
+
# Update args with the defaults
|
| 939 |
+
for name, attr in defaults.__dict__.items():
|
| 940 |
+
if isinstance(attr, dict):
|
| 941 |
+
for k in defaults.deepspeed_config:
|
| 942 |
+
setattr(args, k, defaults.deepspeed_config[k])
|
| 943 |
+
for k in defaults.fsdp_config:
|
| 944 |
+
arg_to_set = k
|
| 945 |
+
if "fsdp" not in arg_to_set:
|
| 946 |
+
arg_to_set = "fsdp_" + arg_to_set
|
| 947 |
+
setattr(args, arg_to_set, defaults.fsdp_config[k])
|
| 948 |
+
for k in defaults.megatron_lm_config:
|
| 949 |
+
setattr(args, k, defaults.megatron_lm_config[k])
|
| 950 |
+
for k in defaults.dynamo_config:
|
| 951 |
+
setattr(args, k, defaults.dynamo_config[k])
|
| 952 |
+
for k in defaults.ipex_config:
|
| 953 |
+
setattr(args, k, defaults.ipex_config[k])
|
| 954 |
+
for k in defaults.mpirun_config:
|
| 955 |
+
setattr(args, k, defaults.mpirun_config[k])
|
| 956 |
+
continue
|
| 957 |
+
|
| 958 |
+
# Those args are handled separately
|
| 959 |
+
if (
|
| 960 |
+
name not in ["compute_environment", "mixed_precision", "distributed_type"]
|
| 961 |
+
and getattr(args, name, None) is None
|
| 962 |
+
):
|
| 963 |
+
setattr(args, name, attr)
|
| 964 |
+
if not args.debug:
|
| 965 |
+
args.debug = defaults.debug
|
| 966 |
+
|
| 967 |
+
if not args.mixed_precision:
|
| 968 |
+
if defaults.mixed_precision is None:
|
| 969 |
+
args.mixed_precision = "no"
|
| 970 |
+
else:
|
| 971 |
+
args.mixed_precision = defaults.mixed_precision
|
| 972 |
+
mp_from_config_flag = True
|
| 973 |
+
else:
|
| 974 |
+
if args.use_cpu or (args.use_xpu and torch.xpu.is_available()):
|
| 975 |
+
native_amp = is_torch_version(">=", "1.10")
|
| 976 |
+
else:
|
| 977 |
+
native_amp = is_bf16_available(True)
|
| 978 |
+
if (
|
| 979 |
+
args.mixed_precision == "bf16"
|
| 980 |
+
and not native_amp
|
| 981 |
+
and not (args.tpu and is_torch_xla_available(check_is_tpu=True))
|
| 982 |
+
):
|
| 983 |
+
raise ValueError("bf16 mixed precision requires PyTorch >= 1.10 and a supported device.")
|
| 984 |
+
|
| 985 |
+
# Silently set the default here
|
| 986 |
+
if args.dynamo_backend is None:
|
| 987 |
+
args.dynamo_backend = "no"
|
| 988 |
+
else:
|
| 989 |
+
if args.num_processes is None:
|
| 990 |
+
if args.use_xpu and is_xpu_available():
|
| 991 |
+
args.num_processes = torch.xpu.device_count()
|
| 992 |
+
elif is_mlu_available():
|
| 993 |
+
args.num_processes = torch.mlu.device_count()
|
| 994 |
+
elif is_npu_available():
|
| 995 |
+
args.num_processes = torch.npu.device_count()
|
| 996 |
+
else:
|
| 997 |
+
args.num_processes = torch.cuda.device_count()
|
| 998 |
+
warned.append(f"\t`--num_processes` was set to a value of `{args.num_processes}`")
|
| 999 |
+
if args.debug is None:
|
| 1000 |
+
args.debug = False
|
| 1001 |
+
if not args.multi_gpu and (
|
| 1002 |
+
(args.use_xpu and is_xpu_available() and torch.xpu.device_count() > 1)
|
| 1003 |
+
or (is_mlu_available() and torch.mlu.device_count() > 1)
|
| 1004 |
+
or (is_npu_available() and torch.npu.device_count() > 1)
|
| 1005 |
+
or (torch.cuda.device_count() > 1)
|
| 1006 |
+
):
|
| 1007 |
+
warned.append(
|
| 1008 |
+
"\t\tMore than one GPU was found, enabling multi-GPU training.\n"
|
| 1009 |
+
"\t\tIf this was unintended please pass in `--num_processes=1`."
|
| 1010 |
+
)
|
| 1011 |
+
args.multi_gpu = True
|
| 1012 |
+
if args.num_machines is None:
|
| 1013 |
+
warned.append("\t`--num_machines` was set to a value of `1`")
|
| 1014 |
+
args.num_machines = 1
|
| 1015 |
+
if args.mixed_precision is None:
|
| 1016 |
+
warned.append("\t`--mixed_precision` was set to a value of `'no'`")
|
| 1017 |
+
args.mixed_precision = "no"
|
| 1018 |
+
if not hasattr(args, "use_cpu"):
|
| 1019 |
+
args.use_cpu = args.cpu
|
| 1020 |
+
if args.dynamo_backend is None:
|
| 1021 |
+
warned.append("\t`--dynamo_backend` was set to a value of `'no'`")
|
| 1022 |
+
args.dynamo_backend = "no"
|
| 1023 |
+
if args.debug:
|
| 1024 |
+
logger.debug("Running script in debug mode, expect distributed operations to be slightly slower.")
|
| 1025 |
+
|
| 1026 |
+
is_aws_env_disabled = defaults is None or (
|
| 1027 |
+
defaults is not None and defaults.compute_environment != ComputeEnvironment.AMAZON_SAGEMAKER
|
| 1028 |
+
)
|
| 1029 |
+
if is_aws_env_disabled and args.num_cpu_threads_per_process is None:
|
| 1030 |
+
args.num_cpu_threads_per_process = 1
|
| 1031 |
+
if args.use_cpu and args.num_processes >= 1:
|
| 1032 |
+
local_size = get_int_from_env(
|
| 1033 |
+
["MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], 1
|
| 1034 |
+
)
|
| 1035 |
+
threads_per_process = int(psutil.cpu_count(logical=False) / local_size)
|
| 1036 |
+
if threads_per_process > 1:
|
| 1037 |
+
args.num_cpu_threads_per_process = threads_per_process
|
| 1038 |
+
warned.append(
|
| 1039 |
+
f"\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance when training on CPUs"
|
| 1040 |
+
)
|
| 1041 |
+
|
| 1042 |
+
if any(warned):
|
| 1043 |
+
message = "The following values were not passed to `accelerate launch` and had defaults used instead:\n"
|
| 1044 |
+
message += "\n".join(warned)
|
| 1045 |
+
message += (
|
| 1046 |
+
"\nTo avoid this warning pass in values for each of the problematic parameters or run `accelerate config`."
|
| 1047 |
+
)
|
| 1048 |
+
logger.warning(message)
|
| 1049 |
+
return args, defaults, mp_from_config_flag
|
| 1050 |
+
|
| 1051 |
+
|
| 1052 |
+
def launch_command(args):
|
| 1053 |
+
args, defaults, mp_from_config_flag = _validate_launch_command(args)
|
| 1054 |
+
# Use the proper launcher
|
| 1055 |
+
if args.use_deepspeed and not args.cpu:
|
| 1056 |
+
args.deepspeed_fields_from_accelerate_config = list(defaults.deepspeed_config.keys()) if defaults else []
|
| 1057 |
+
if mp_from_config_flag:
|
| 1058 |
+
args.deepspeed_fields_from_accelerate_config.append("mixed_precision")
|
| 1059 |
+
args.deepspeed_fields_from_accelerate_config = ",".join(args.deepspeed_fields_from_accelerate_config)
|
| 1060 |
+
deepspeed_launcher(args)
|
| 1061 |
+
elif args.use_fsdp and not args.cpu:
|
| 1062 |
+
multi_gpu_launcher(args)
|
| 1063 |
+
elif args.use_megatron_lm and not args.cpu:
|
| 1064 |
+
multi_gpu_launcher(args)
|
| 1065 |
+
elif args.multi_gpu and not args.cpu:
|
| 1066 |
+
multi_gpu_launcher(args)
|
| 1067 |
+
elif args.tpu and not args.cpu:
|
| 1068 |
+
if args.tpu_use_cluster:
|
| 1069 |
+
tpu_pod_launcher(args)
|
| 1070 |
+
else:
|
| 1071 |
+
tpu_launcher(args)
|
| 1072 |
+
elif defaults is not None and defaults.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
|
| 1073 |
+
sagemaker_launcher(defaults, args)
|
| 1074 |
+
else:
|
| 1075 |
+
simple_launcher(args)
|
| 1076 |
+
|
| 1077 |
+
|
| 1078 |
+
def main():
|
| 1079 |
+
parser = launch_command_parser()
|
| 1080 |
+
args = parser.parse_args()
|
| 1081 |
+
launch_command(args)
|
| 1082 |
+
|
| 1083 |
+
|
| 1084 |
+
if __name__ == "__main__":
|
| 1085 |
+
main()
|
venv/lib/python3.10/site-packages/accelerate/commands/menu/__init__.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
from .selection_menu import BulletMenu
|
venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (241 Bytes). View file
|
|
|
venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/cursor.cpython-310.pyc
ADDED
|
Binary file (1.43 kB). View file
|
|
|
venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/helpers.cpython-310.pyc
ADDED
|
Binary file (1.65 kB). View file
|
|
|
venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/input.cpython-310.pyc
ADDED
|
Binary file (2.38 kB). View file
|
|
|
venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/keymap.cpython-310.pyc
ADDED
|
Binary file (2.4 kB). View file
|
|
|
venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/selection_menu.cpython-310.pyc
ADDED
|
Binary file (4.44 kB). View file
|
|
|
venv/lib/python3.10/site-packages/accelerate/commands/menu/cursor.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
"""
|
| 16 |
+
A utility for showing and hiding the terminal cursor on Windows and Linux, based on https://github.com/bchao1/bullet
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
import os
|
| 20 |
+
import sys
|
| 21 |
+
from contextlib import contextmanager
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# Windows only
|
| 25 |
+
if os.name == "nt":
|
| 26 |
+
import ctypes
|
| 27 |
+
import msvcrt # noqa
|
| 28 |
+
|
| 29 |
+
class CursorInfo(ctypes.Structure):
|
| 30 |
+
# _fields is a specific attr expected by ctypes
|
| 31 |
+
_fields_ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def hide_cursor():
|
| 35 |
+
if os.name == "nt":
|
| 36 |
+
ci = CursorInfo()
|
| 37 |
+
handle = ctypes.windll.kernel32.GetStdHandle(-11)
|
| 38 |
+
ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))
|
| 39 |
+
ci.visible = False
|
| 40 |
+
ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))
|
| 41 |
+
elif os.name == "posix":
|
| 42 |
+
sys.stdout.write("\033[?25l")
|
| 43 |
+
sys.stdout.flush()
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def show_cursor():
|
| 47 |
+
if os.name == "nt":
|
| 48 |
+
ci = CursorInfo()
|
| 49 |
+
handle = ctypes.windll.kernel32.GetStdHandle(-11)
|
| 50 |
+
ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))
|
| 51 |
+
ci.visible = True
|
| 52 |
+
ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))
|
| 53 |
+
elif os.name == "posix":
|
| 54 |
+
sys.stdout.write("\033[?25h")
|
| 55 |
+
sys.stdout.flush()
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
@contextmanager
|
| 59 |
+
def hide():
|
| 60 |
+
"Context manager to hide the terminal cursor"
|
| 61 |
+
try:
|
| 62 |
+
hide_cursor()
|
| 63 |
+
yield
|
| 64 |
+
finally:
|
| 65 |
+
show_cursor()
|
venv/lib/python3.10/site-packages/accelerate/commands/menu/helpers.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
"""
|
| 16 |
+
A variety of helper functions and constants when dealing with terminal menu choices, based on
|
| 17 |
+
https://github.com/bchao1/bullet
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
import enum
|
| 21 |
+
import shutil
|
| 22 |
+
import sys
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
TERMINAL_WIDTH, _ = shutil.get_terminal_size()
|
| 26 |
+
|
| 27 |
+
CURSOR_TO_CHAR = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"}
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class Direction(enum.Enum):
|
| 31 |
+
UP = 0
|
| 32 |
+
DOWN = 1
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def forceWrite(content, end=""):
|
| 36 |
+
sys.stdout.write(str(content) + end)
|
| 37 |
+
sys.stdout.flush()
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def writeColor(content, color, end=""):
|
| 41 |
+
forceWrite(f"\u001b[{color}m{content}\u001b[0m", end)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def reset_cursor():
|
| 45 |
+
forceWrite("\r")
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def move_cursor(num_lines: int, direction: str):
|
| 49 |
+
forceWrite(f"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}")
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def clear_line():
|
| 53 |
+
forceWrite(" " * TERMINAL_WIDTH)
|
| 54 |
+
reset_cursor()
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def linebreak():
|
| 58 |
+
reset_cursor()
|
| 59 |
+
forceWrite("-" * TERMINAL_WIDTH)
|
venv/lib/python3.10/site-packages/accelerate/commands/menu/input.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
"""
|
| 16 |
+
This file contains utilities for handling input from the user and registering specific keys to specific functions,
|
| 17 |
+
based on https://github.com/bchao1/bullet
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
from typing import List
|
| 21 |
+
|
| 22 |
+
from .keymap import KEYMAP, get_character
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def mark(key: str):
|
| 26 |
+
"""
|
| 27 |
+
Mark the function with the key code so it can be handled in the register
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
def decorator(func):
|
| 31 |
+
handle = getattr(func, "handle_key", [])
|
| 32 |
+
handle += [key]
|
| 33 |
+
func.handle_key = handle
|
| 34 |
+
return func
|
| 35 |
+
|
| 36 |
+
return decorator
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def mark_multiple(*keys: List[str]):
|
| 40 |
+
"""
|
| 41 |
+
Mark the function with the key codes so it can be handled in the register
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
def decorator(func):
|
| 45 |
+
handle = getattr(func, "handle_key", [])
|
| 46 |
+
handle += keys
|
| 47 |
+
func.handle_key = handle
|
| 48 |
+
return func
|
| 49 |
+
|
| 50 |
+
return decorator
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class KeyHandler(type):
|
| 54 |
+
"""
|
| 55 |
+
Metaclass that adds the key handlers to the class
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
def __new__(cls, name, bases, attrs):
|
| 59 |
+
new_cls = super().__new__(cls, name, bases, attrs)
|
| 60 |
+
if not hasattr(new_cls, "key_handler"):
|
| 61 |
+
new_cls.key_handler = {}
|
| 62 |
+
new_cls.handle_input = KeyHandler.handle_input
|
| 63 |
+
|
| 64 |
+
for value in attrs.values():
|
| 65 |
+
handled_keys = getattr(value, "handle_key", [])
|
| 66 |
+
for key in handled_keys:
|
| 67 |
+
new_cls.key_handler[key] = value
|
| 68 |
+
return new_cls
|
| 69 |
+
|
| 70 |
+
@staticmethod
|
| 71 |
+
def handle_input(cls):
|
| 72 |
+
"Finds and returns the selected character if it exists in the handler"
|
| 73 |
+
char = get_character()
|
| 74 |
+
if char != KEYMAP["undefined"]:
|
| 75 |
+
char = ord(char)
|
| 76 |
+
handler = cls.key_handler.get(char)
|
| 77 |
+
if handler:
|
| 78 |
+
cls.current_selection = char
|
| 79 |
+
return handler(cls)
|
| 80 |
+
else:
|
| 81 |
+
return None
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def register(cls):
|
| 85 |
+
"""Adds KeyHandler metaclass to the class"""
|
| 86 |
+
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy())
|
venv/lib/python3.10/site-packages/accelerate/commands/menu/keymap.py
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
"""
|
| 16 |
+
Utilities relating to parsing raw characters from the keyboard, based on https://github.com/bchao1/bullet
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
import os
|
| 20 |
+
import string
|
| 21 |
+
import sys
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
ARROW_KEY_FLAG = 1 << 8
|
| 25 |
+
|
| 26 |
+
KEYMAP = {
|
| 27 |
+
"tab": ord("\t"),
|
| 28 |
+
"newline": ord("\r"),
|
| 29 |
+
"esc": 27,
|
| 30 |
+
"up": 65 + ARROW_KEY_FLAG,
|
| 31 |
+
"down": 66 + ARROW_KEY_FLAG,
|
| 32 |
+
"right": 67 + ARROW_KEY_FLAG,
|
| 33 |
+
"left": 68 + ARROW_KEY_FLAG,
|
| 34 |
+
"mod_int": 91,
|
| 35 |
+
"undefined": sys.maxsize,
|
| 36 |
+
"interrupt": 3,
|
| 37 |
+
"insert": 50,
|
| 38 |
+
"delete": 51,
|
| 39 |
+
"pg_up": 53,
|
| 40 |
+
"pg_down": 54,
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
KEYMAP["arrow_begin"] = KEYMAP["up"]
|
| 44 |
+
KEYMAP["arrow_end"] = KEYMAP["left"]
|
| 45 |
+
|
| 46 |
+
if sys.platform == "win32":
|
| 47 |
+
WIN_CH_BUFFER = []
|
| 48 |
+
WIN_KEYMAP = {
|
| 49 |
+
b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
|
| 50 |
+
b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
|
| 51 |
+
b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
|
| 52 |
+
b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
|
| 53 |
+
b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
|
| 54 |
+
b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
|
| 55 |
+
b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
|
| 56 |
+
b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
for i in range(10):
|
| 60 |
+
KEYMAP[str(i)] = ord(str(i))
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def get_raw_chars():
|
| 64 |
+
"Gets raw characters from inputs"
|
| 65 |
+
if os.name == "nt":
|
| 66 |
+
import msvcrt
|
| 67 |
+
|
| 68 |
+
encoding = "mbcs"
|
| 69 |
+
# Flush the keyboard buffer
|
| 70 |
+
while msvcrt.kbhit():
|
| 71 |
+
msvcrt.getch()
|
| 72 |
+
if len(WIN_CH_BUFFER) == 0:
|
| 73 |
+
# Read the keystroke
|
| 74 |
+
ch = msvcrt.getch()
|
| 75 |
+
|
| 76 |
+
# If it is a prefix char, get second part
|
| 77 |
+
if ch in (b"\x00", b"\xe0"):
|
| 78 |
+
ch2 = ch + msvcrt.getch()
|
| 79 |
+
# Translate actual Win chars to bullet char types
|
| 80 |
+
try:
|
| 81 |
+
chx = chr(WIN_KEYMAP[ch2])
|
| 82 |
+
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"]))
|
| 83 |
+
WIN_CH_BUFFER.append(chx)
|
| 84 |
+
if ord(chx) in (
|
| 85 |
+
KEYMAP["insert"] - 1 << 9,
|
| 86 |
+
KEYMAP["delete"] - 1 << 9,
|
| 87 |
+
KEYMAP["pg_up"] - 1 << 9,
|
| 88 |
+
KEYMAP["pg_down"] - 1 << 9,
|
| 89 |
+
):
|
| 90 |
+
WIN_CH_BUFFER.append(chr(126))
|
| 91 |
+
ch = chr(KEYMAP["esc"])
|
| 92 |
+
except KeyError:
|
| 93 |
+
ch = ch2[1]
|
| 94 |
+
else:
|
| 95 |
+
ch = ch.decode(encoding)
|
| 96 |
+
else:
|
| 97 |
+
ch = WIN_CH_BUFFER.pop(0)
|
| 98 |
+
elif os.name == "posix":
|
| 99 |
+
import termios
|
| 100 |
+
import tty
|
| 101 |
+
|
| 102 |
+
fd = sys.stdin.fileno()
|
| 103 |
+
old_settings = termios.tcgetattr(fd)
|
| 104 |
+
try:
|
| 105 |
+
tty.setraw(fd)
|
| 106 |
+
ch = sys.stdin.read(1)
|
| 107 |
+
finally:
|
| 108 |
+
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
|
| 109 |
+
return ch
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def get_character():
|
| 113 |
+
"Gets a character from the keyboard and returns the key code"
|
| 114 |
+
char = get_raw_chars()
|
| 115 |
+
if ord(char) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
|
| 116 |
+
return char
|
| 117 |
+
|
| 118 |
+
elif ord(char) == KEYMAP["esc"]:
|
| 119 |
+
combo = get_raw_chars()
|
| 120 |
+
if ord(combo) == KEYMAP["mod_int"]:
|
| 121 |
+
key = get_raw_chars()
|
| 122 |
+
if ord(key) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(key) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
|
| 123 |
+
return chr(ord(key) + ARROW_KEY_FLAG)
|
| 124 |
+
else:
|
| 125 |
+
return KEYMAP["undefined"]
|
| 126 |
+
else:
|
| 127 |
+
return get_raw_chars()
|
| 128 |
+
|
| 129 |
+
else:
|
| 130 |
+
if char in string.printable:
|
| 131 |
+
return char
|
| 132 |
+
else:
|
| 133 |
+
return KEYMAP["undefined"]
|
venv/lib/python3.10/site-packages/accelerate/commands/menu/selection_menu.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
"""
|
| 16 |
+
Main driver for the selection menu, based on https://github.com/bchao1/bullet
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
import builtins
|
| 20 |
+
import sys
|
| 21 |
+
|
| 22 |
+
from ...utils.imports import _is_package_available
|
| 23 |
+
from . import cursor, input
|
| 24 |
+
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
|
| 25 |
+
from .keymap import KEYMAP
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
in_colab = False
|
| 29 |
+
try:
|
| 30 |
+
in_colab = _is_package_available("google.colab")
|
| 31 |
+
except ModuleNotFoundError:
|
| 32 |
+
pass
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@input.register
|
| 36 |
+
class BulletMenu:
|
| 37 |
+
"""
|
| 38 |
+
A CLI menu to select a choice from a list of choices using the keyboard.
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
def __init__(self, prompt: str = None, choices: list = []):
|
| 42 |
+
self.position = 0
|
| 43 |
+
self.choices = choices
|
| 44 |
+
self.prompt = prompt
|
| 45 |
+
if sys.platform == "win32":
|
| 46 |
+
self.arrow_char = "*"
|
| 47 |
+
else:
|
| 48 |
+
self.arrow_char = "➔ "
|
| 49 |
+
|
| 50 |
+
def write_choice(self, index, end: str = ""):
|
| 51 |
+
if sys.platform != "win32":
|
| 52 |
+
writeColor(self.choices[index], 32, end)
|
| 53 |
+
else:
|
| 54 |
+
forceWrite(self.choices[index], end)
|
| 55 |
+
|
| 56 |
+
def print_choice(self, index: int):
|
| 57 |
+
"Prints the choice at the given index"
|
| 58 |
+
if index == self.position:
|
| 59 |
+
forceWrite(f" {self.arrow_char} ")
|
| 60 |
+
self.write_choice(index)
|
| 61 |
+
else:
|
| 62 |
+
forceWrite(f" {self.choices[index]}")
|
| 63 |
+
reset_cursor()
|
| 64 |
+
|
| 65 |
+
def move_direction(self, direction: Direction, num_spaces: int = 1):
|
| 66 |
+
"Should not be directly called, used to move a direction of either up or down"
|
| 67 |
+
old_position = self.position
|
| 68 |
+
if direction == Direction.DOWN:
|
| 69 |
+
if self.position + 1 >= len(self.choices):
|
| 70 |
+
return
|
| 71 |
+
self.position += num_spaces
|
| 72 |
+
else:
|
| 73 |
+
if self.position - 1 < 0:
|
| 74 |
+
return
|
| 75 |
+
self.position -= num_spaces
|
| 76 |
+
clear_line()
|
| 77 |
+
self.print_choice(old_position)
|
| 78 |
+
move_cursor(num_spaces, direction.name)
|
| 79 |
+
self.print_choice(self.position)
|
| 80 |
+
|
| 81 |
+
@input.mark(KEYMAP["up"])
|
| 82 |
+
def move_up(self):
|
| 83 |
+
self.move_direction(Direction.UP)
|
| 84 |
+
|
| 85 |
+
@input.mark(KEYMAP["down"])
|
| 86 |
+
def move_down(self):
|
| 87 |
+
self.move_direction(Direction.DOWN)
|
| 88 |
+
|
| 89 |
+
@input.mark(KEYMAP["newline"])
|
| 90 |
+
def select(self):
|
| 91 |
+
move_cursor(len(self.choices) - self.position, "DOWN")
|
| 92 |
+
return self.position
|
| 93 |
+
|
| 94 |
+
@input.mark(KEYMAP["interrupt"])
|
| 95 |
+
def interrupt(self):
|
| 96 |
+
move_cursor(len(self.choices) - self.position, "DOWN")
|
| 97 |
+
raise KeyboardInterrupt
|
| 98 |
+
|
| 99 |
+
@input.mark_multiple(*[KEYMAP[str(number)] for number in range(10)])
|
| 100 |
+
def select_row(self):
|
| 101 |
+
index = int(chr(self.current_selection))
|
| 102 |
+
movement = index - self.position
|
| 103 |
+
if index == self.position:
|
| 104 |
+
return
|
| 105 |
+
if index < len(self.choices):
|
| 106 |
+
if self.position > index:
|
| 107 |
+
self.move_direction(Direction.UP, -movement)
|
| 108 |
+
elif self.position < index:
|
| 109 |
+
self.move_direction(Direction.DOWN, movement)
|
| 110 |
+
else:
|
| 111 |
+
return
|
| 112 |
+
else:
|
| 113 |
+
return
|
| 114 |
+
|
| 115 |
+
def run(self, default_choice: int = 0):
|
| 116 |
+
"Start the menu and return the selected choice"
|
| 117 |
+
if self.prompt:
|
| 118 |
+
linebreak()
|
| 119 |
+
forceWrite(self.prompt, "\n")
|
| 120 |
+
if in_colab:
|
| 121 |
+
forceWrite("Please input a choice index (starting from 0), and press enter", "\n")
|
| 122 |
+
else:
|
| 123 |
+
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n")
|
| 124 |
+
self.position = default_choice
|
| 125 |
+
for i in range(len(self.choices)):
|
| 126 |
+
self.print_choice(i)
|
| 127 |
+
forceWrite("\n")
|
| 128 |
+
move_cursor(len(self.choices) - self.position, "UP")
|
| 129 |
+
with cursor.hide():
|
| 130 |
+
while True:
|
| 131 |
+
if in_colab:
|
| 132 |
+
try:
|
| 133 |
+
choice = int(builtins.input())
|
| 134 |
+
except ValueError:
|
| 135 |
+
choice = default_choice
|
| 136 |
+
else:
|
| 137 |
+
choice = self.handle_input()
|
| 138 |
+
if choice is not None:
|
| 139 |
+
reset_cursor()
|
| 140 |
+
for _ in range(len(self.choices) + 1):
|
| 141 |
+
move_cursor(1, "UP")
|
| 142 |
+
clear_line()
|
| 143 |
+
self.write_choice(choice, "\n")
|
| 144 |
+
return choice
|
venv/lib/python3.10/site-packages/accelerate/commands/test.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
|
| 3 |
+
# Copyright 2021 The HuggingFace Team. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
import argparse
|
| 18 |
+
|
| 19 |
+
from accelerate.test_utils import execute_subprocess_async, path_in_accelerate_package
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def test_command_parser(subparsers=None):
|
| 23 |
+
if subparsers is not None:
|
| 24 |
+
parser = subparsers.add_parser("test")
|
| 25 |
+
else:
|
| 26 |
+
parser = argparse.ArgumentParser("Accelerate test command")
|
| 27 |
+
|
| 28 |
+
parser.add_argument(
|
| 29 |
+
"--config_file",
|
| 30 |
+
default=None,
|
| 31 |
+
help=(
|
| 32 |
+
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
|
| 33 |
+
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
|
| 34 |
+
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
|
| 35 |
+
"with 'huggingface'."
|
| 36 |
+
),
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
if subparsers is not None:
|
| 40 |
+
parser.set_defaults(func=test_command)
|
| 41 |
+
return parser
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def test_command(args):
|
| 45 |
+
script_name = path_in_accelerate_package("test_utils", "scripts", "test_script.py")
|
| 46 |
+
|
| 47 |
+
if args.config_file is None:
|
| 48 |
+
test_args = [script_name]
|
| 49 |
+
else:
|
| 50 |
+
test_args = f"--config_file={args.config_file} {script_name}".split()
|
| 51 |
+
|
| 52 |
+
cmd = ["accelerate-launch"] + test_args
|
| 53 |
+
result = execute_subprocess_async(cmd)
|
| 54 |
+
if result.returncode == 0:
|
| 55 |
+
print("Test is a success! You are ready for your distributed training!")
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def main():
|
| 59 |
+
parser = test_command_parser()
|
| 60 |
+
args = parser.parse_args()
|
| 61 |
+
test_command(args)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
if __name__ == "__main__":
|
| 65 |
+
main()
|
venv/lib/python3.10/site-packages/accelerate/commands/tpu.py
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
|
| 3 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
import argparse
|
| 18 |
+
import os
|
| 19 |
+
import subprocess
|
| 20 |
+
|
| 21 |
+
from packaging.version import Version, parse
|
| 22 |
+
|
| 23 |
+
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
_description = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def tpu_command_parser(subparsers=None):
|
| 30 |
+
if subparsers is not None:
|
| 31 |
+
parser = subparsers.add_parser("tpu-config", description=_description)
|
| 32 |
+
else:
|
| 33 |
+
parser = argparse.ArgumentParser("Accelerate tpu-config command", description=_description)
|
| 34 |
+
# Core arguments
|
| 35 |
+
config_args = parser.add_argument_group(
|
| 36 |
+
"Config Arguments", "Arguments that can be configured through `accelerate config`."
|
| 37 |
+
)
|
| 38 |
+
config_args.add_argument(
|
| 39 |
+
"--config_file",
|
| 40 |
+
type=str,
|
| 41 |
+
default=None,
|
| 42 |
+
help="Path to the config file to use for accelerate.",
|
| 43 |
+
)
|
| 44 |
+
config_args.add_argument(
|
| 45 |
+
"--tpu_name",
|
| 46 |
+
default=None,
|
| 47 |
+
help="The name of the TPU to use. If not specified, will use the TPU specified in the config file.",
|
| 48 |
+
)
|
| 49 |
+
config_args.add_argument(
|
| 50 |
+
"--tpu_zone",
|
| 51 |
+
default=None,
|
| 52 |
+
help="The zone of the TPU to use. If not specified, will use the zone specified in the config file.",
|
| 53 |
+
)
|
| 54 |
+
pod_args = parser.add_argument_group("TPU Arguments", "Arguments for options ran inside the TPU.")
|
| 55 |
+
pod_args.add_argument(
|
| 56 |
+
"--use_alpha",
|
| 57 |
+
action="store_true",
|
| 58 |
+
help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.",
|
| 59 |
+
)
|
| 60 |
+
pod_args.add_argument(
|
| 61 |
+
"--command_file",
|
| 62 |
+
default=None,
|
| 63 |
+
help="The path to the file containing the commands to run on the pod on startup.",
|
| 64 |
+
)
|
| 65 |
+
pod_args.add_argument(
|
| 66 |
+
"--command",
|
| 67 |
+
action="append",
|
| 68 |
+
nargs="+",
|
| 69 |
+
help="A command to run on the pod. Can be passed multiple times.",
|
| 70 |
+
)
|
| 71 |
+
pod_args.add_argument(
|
| 72 |
+
"--install_accelerate",
|
| 73 |
+
action="store_true",
|
| 74 |
+
help="Whether to install accelerate on the pod. Defaults to False.",
|
| 75 |
+
)
|
| 76 |
+
pod_args.add_argument(
|
| 77 |
+
"--accelerate_version",
|
| 78 |
+
default="latest",
|
| 79 |
+
help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.",
|
| 80 |
+
)
|
| 81 |
+
pod_args.add_argument(
|
| 82 |
+
"--debug", action="store_true", help="If set, will print the command that would be run instead of running it."
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
if subparsers is not None:
|
| 86 |
+
parser.set_defaults(func=tpu_command_launcher)
|
| 87 |
+
return parser
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def tpu_command_launcher(args):
|
| 91 |
+
defaults = None
|
| 92 |
+
|
| 93 |
+
# Get the default from the config file if it exists.
|
| 94 |
+
if args.config_file is not None or os.path.isfile(default_config_file):
|
| 95 |
+
defaults = load_config_from_file(args.config_file)
|
| 96 |
+
if not args.command_file and defaults.command_file is not None and not args.command:
|
| 97 |
+
args.command_file = defaults.command_file
|
| 98 |
+
if not args.command and defaults.commands is not None:
|
| 99 |
+
args.command = defaults.commands
|
| 100 |
+
if not args.tpu_name:
|
| 101 |
+
args.tpu_name = defaults.tpu_name
|
| 102 |
+
if not args.tpu_zone:
|
| 103 |
+
args.tpu_zone = defaults.tpu_zone
|
| 104 |
+
if args.accelerate_version == "dev":
|
| 105 |
+
args.accelerate_version = "git+https://github.com/huggingface/accelerate.git"
|
| 106 |
+
elif args.accelerate_version == "latest":
|
| 107 |
+
args.accelerate_version = "accelerate -U"
|
| 108 |
+
elif isinstance(parse(args.accelerate_version), Version):
|
| 109 |
+
args.accelerate_version = f"accelerate=={args.accelerate_version}"
|
| 110 |
+
|
| 111 |
+
if not args.command_file and not args.command:
|
| 112 |
+
raise ValueError("You must specify either a command file or a command to run on the pod.")
|
| 113 |
+
|
| 114 |
+
if args.command_file:
|
| 115 |
+
with open(args.command_file) as f:
|
| 116 |
+
args.command = [f.read().splitlines()]
|
| 117 |
+
|
| 118 |
+
# To turn list of lists into list of strings
|
| 119 |
+
if isinstance(args.command[0], list):
|
| 120 |
+
args.command = [line for cmd in args.command for line in cmd]
|
| 121 |
+
# Default to the shared folder and install accelerate
|
| 122 |
+
new_cmd = ["cd /usr/share"]
|
| 123 |
+
if args.install_accelerate:
|
| 124 |
+
new_cmd += [f"pip install {args.accelerate_version}"]
|
| 125 |
+
new_cmd += args.command
|
| 126 |
+
args.command = "; ".join(new_cmd)
|
| 127 |
+
|
| 128 |
+
# Then send it to gcloud
|
| 129 |
+
# Eventually try to use google-api-core to do this instead of subprocess
|
| 130 |
+
cmd = ["gcloud"]
|
| 131 |
+
if args.use_alpha:
|
| 132 |
+
cmd += ["alpha"]
|
| 133 |
+
cmd += [
|
| 134 |
+
"compute",
|
| 135 |
+
"tpus",
|
| 136 |
+
"tpu-vm",
|
| 137 |
+
"ssh",
|
| 138 |
+
args.tpu_name,
|
| 139 |
+
"--zone",
|
| 140 |
+
args.tpu_zone,
|
| 141 |
+
"--command",
|
| 142 |
+
args.command,
|
| 143 |
+
"--worker",
|
| 144 |
+
"all",
|
| 145 |
+
]
|
| 146 |
+
if args.debug:
|
| 147 |
+
print(f"Running {' '.join(cmd)}")
|
| 148 |
+
return
|
| 149 |
+
subprocess.run(cmd)
|
| 150 |
+
print("Successfully setup pod.")
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def main():
|
| 154 |
+
parser = tpu_command_parser()
|
| 155 |
+
args = parser.parse_args()
|
| 156 |
+
|
| 157 |
+
tpu_command_launcher(args)
|
venv/lib/python3.10/site-packages/accelerate/commands/utils.py
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import argparse
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class _StoreAction(argparse.Action):
|
| 19 |
+
"""
|
| 20 |
+
Custom action that allows for `-` or `_` to be passed in for an argument.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
def __init__(self, *args, **kwargs):
|
| 24 |
+
super().__init__(*args, **kwargs)
|
| 25 |
+
new_option_strings = []
|
| 26 |
+
for option_string in self.option_strings:
|
| 27 |
+
new_option_strings.append(option_string)
|
| 28 |
+
if "_" in option_string[2:]:
|
| 29 |
+
# Add `-` version to the option string
|
| 30 |
+
new_option_strings.append(option_string.replace("_", "-"))
|
| 31 |
+
self.option_strings = new_option_strings
|
| 32 |
+
|
| 33 |
+
def __call__(self, parser, namespace, values, option_string=None):
|
| 34 |
+
setattr(namespace, self.dest, values)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class _StoreConstAction(_StoreAction):
|
| 38 |
+
"""
|
| 39 |
+
Same as `argparse._StoreConstAction` but uses the custom `_StoreAction`.
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
def __init__(self, option_strings, dest, const, default=None, required=False, help=None):
|
| 43 |
+
super().__init__(
|
| 44 |
+
option_strings=option_strings,
|
| 45 |
+
dest=dest,
|
| 46 |
+
nargs=0,
|
| 47 |
+
const=const,
|
| 48 |
+
default=default,
|
| 49 |
+
required=required,
|
| 50 |
+
help=help,
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
def __call__(self, parser, namespace, values, option_string=None):
|
| 54 |
+
setattr(namespace, self.dest, self.const)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class _StoreTrueAction(_StoreConstAction):
|
| 58 |
+
"""
|
| 59 |
+
Same as `argparse._StoreTrueAction` but uses the custom `_StoreConstAction`.
|
| 60 |
+
"""
|
| 61 |
+
|
| 62 |
+
def __init__(
|
| 63 |
+
self,
|
| 64 |
+
option_strings,
|
| 65 |
+
dest,
|
| 66 |
+
default=None,
|
| 67 |
+
required=False,
|
| 68 |
+
help=None,
|
| 69 |
+
):
|
| 70 |
+
super().__init__(
|
| 71 |
+
option_strings=option_strings, dest=dest, const=True, default=default, required=required, help=help
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
class CustomArgumentGroup(argparse._ArgumentGroup):
|
| 76 |
+
"""
|
| 77 |
+
Custom argument group that allows for the use of `-` or `_` in arguments passed and overrides the help for each
|
| 78 |
+
when applicable.
|
| 79 |
+
"""
|
| 80 |
+
|
| 81 |
+
def _add_action(self, action):
|
| 82 |
+
args = vars(action)
|
| 83 |
+
if isinstance(action, argparse._StoreTrueAction):
|
| 84 |
+
action = _StoreTrueAction(
|
| 85 |
+
args["option_strings"], args["dest"], args["default"], args["required"], args["help"]
|
| 86 |
+
)
|
| 87 |
+
elif isinstance(action, argparse._StoreConstAction):
|
| 88 |
+
action = _StoreConstAction(
|
| 89 |
+
args["option_strings"],
|
| 90 |
+
args["dest"],
|
| 91 |
+
args["const"],
|
| 92 |
+
args["default"],
|
| 93 |
+
args["required"],
|
| 94 |
+
args["help"],
|
| 95 |
+
)
|
| 96 |
+
elif isinstance(action, argparse._StoreAction):
|
| 97 |
+
action = _StoreAction(**args)
|
| 98 |
+
action = super()._add_action(action)
|
| 99 |
+
return action
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
class CustomArgumentParser(argparse.ArgumentParser):
|
| 103 |
+
"""
|
| 104 |
+
Custom argument parser that allows for the use of `-` or `_` in arguments passed and overrides the help for each
|
| 105 |
+
when applicable.
|
| 106 |
+
"""
|
| 107 |
+
|
| 108 |
+
def add_argument(self, *args, **kwargs):
|
| 109 |
+
if "action" in kwargs:
|
| 110 |
+
# Translate action -> class
|
| 111 |
+
if kwargs["action"] == "store_true":
|
| 112 |
+
kwargs["action"] = _StoreTrueAction
|
| 113 |
+
else:
|
| 114 |
+
kwargs["action"] = _StoreAction
|
| 115 |
+
super().add_argument(*args, **kwargs)
|
| 116 |
+
|
| 117 |
+
def add_argument_group(self, *args, **kwargs):
|
| 118 |
+
group = CustomArgumentGroup(self, *args, **kwargs)
|
| 119 |
+
self._action_groups.append(group)
|
| 120 |
+
return group
|
venv/lib/python3.10/site-packages/accelerate/utils/__init__.py
ADDED
|
@@ -0,0 +1,225 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
from .constants import (
|
| 15 |
+
MODEL_NAME,
|
| 16 |
+
OPTIMIZER_NAME,
|
| 17 |
+
RNG_STATE_NAME,
|
| 18 |
+
SAFE_MODEL_NAME,
|
| 19 |
+
SAFE_WEIGHTS_INDEX_NAME,
|
| 20 |
+
SAFE_WEIGHTS_NAME,
|
| 21 |
+
SAMPLER_NAME,
|
| 22 |
+
SCALER_NAME,
|
| 23 |
+
SCHEDULER_NAME,
|
| 24 |
+
TORCH_DISTRIBUTED_OPERATION_TYPES,
|
| 25 |
+
TORCH_LAUNCH_PARAMS,
|
| 26 |
+
WEIGHTS_INDEX_NAME,
|
| 27 |
+
WEIGHTS_NAME,
|
| 28 |
+
)
|
| 29 |
+
from .dataclasses import (
|
| 30 |
+
AutocastKwargs,
|
| 31 |
+
BnbQuantizationConfig,
|
| 32 |
+
ComputeEnvironment,
|
| 33 |
+
CustomDtype,
|
| 34 |
+
DataLoaderConfiguration,
|
| 35 |
+
DeepSpeedPlugin,
|
| 36 |
+
DistributedDataParallelKwargs,
|
| 37 |
+
DistributedType,
|
| 38 |
+
DynamoBackend,
|
| 39 |
+
FP8RecipeKwargs,
|
| 40 |
+
FullyShardedDataParallelPlugin,
|
| 41 |
+
GradientAccumulationPlugin,
|
| 42 |
+
GradScalerKwargs,
|
| 43 |
+
InitProcessGroupKwargs,
|
| 44 |
+
KwargsHandler,
|
| 45 |
+
LoggerType,
|
| 46 |
+
MegatronLMPlugin,
|
| 47 |
+
PrecisionType,
|
| 48 |
+
ProjectConfiguration,
|
| 49 |
+
RNGType,
|
| 50 |
+
SageMakerDistributedType,
|
| 51 |
+
TensorInformation,
|
| 52 |
+
TorchDynamoPlugin,
|
| 53 |
+
)
|
| 54 |
+
from .environment import (
|
| 55 |
+
are_libraries_initialized,
|
| 56 |
+
check_cuda_p2p_ib_support,
|
| 57 |
+
check_fp8_capability,
|
| 58 |
+
convert_dict_to_env_variables,
|
| 59 |
+
get_cpu_distributed_information,
|
| 60 |
+
get_gpu_info,
|
| 61 |
+
get_int_from_env,
|
| 62 |
+
parse_choice_from_env,
|
| 63 |
+
parse_flag_from_env,
|
| 64 |
+
set_numa_affinity,
|
| 65 |
+
str_to_bool,
|
| 66 |
+
)
|
| 67 |
+
from .imports import (
|
| 68 |
+
get_ccl_version,
|
| 69 |
+
is_4bit_bnb_available,
|
| 70 |
+
is_8bit_bnb_available,
|
| 71 |
+
is_aim_available,
|
| 72 |
+
is_bf16_available,
|
| 73 |
+
is_bnb_available,
|
| 74 |
+
is_boto3_available,
|
| 75 |
+
is_ccl_available,
|
| 76 |
+
is_clearml_available,
|
| 77 |
+
is_comet_ml_available,
|
| 78 |
+
is_cuda_available,
|
| 79 |
+
is_datasets_available,
|
| 80 |
+
is_deepspeed_available,
|
| 81 |
+
is_dvclive_available,
|
| 82 |
+
is_fp8_available,
|
| 83 |
+
is_ipex_available,
|
| 84 |
+
is_megatron_lm_available,
|
| 85 |
+
is_mlflow_available,
|
| 86 |
+
is_mlu_available,
|
| 87 |
+
is_mps_available,
|
| 88 |
+
is_msamp_available,
|
| 89 |
+
is_npu_available,
|
| 90 |
+
is_pandas_available,
|
| 91 |
+
is_peft_available,
|
| 92 |
+
is_pippy_available,
|
| 93 |
+
is_pynvml_available,
|
| 94 |
+
is_rich_available,
|
| 95 |
+
is_sagemaker_available,
|
| 96 |
+
is_tensorboard_available,
|
| 97 |
+
is_timm_available,
|
| 98 |
+
is_torch_xla_available,
|
| 99 |
+
is_transformer_engine_available,
|
| 100 |
+
is_transformers_available,
|
| 101 |
+
is_wandb_available,
|
| 102 |
+
is_xpu_available,
|
| 103 |
+
)
|
| 104 |
+
from .modeling import (
|
| 105 |
+
calculate_maximum_sizes,
|
| 106 |
+
check_device_map,
|
| 107 |
+
check_tied_parameters_in_config,
|
| 108 |
+
check_tied_parameters_on_same_device,
|
| 109 |
+
compute_module_sizes,
|
| 110 |
+
convert_file_size_to_int,
|
| 111 |
+
dtype_byte_size,
|
| 112 |
+
find_tied_parameters,
|
| 113 |
+
get_balanced_memory,
|
| 114 |
+
get_max_layer_size,
|
| 115 |
+
get_max_memory,
|
| 116 |
+
get_mixed_precision_context_manager,
|
| 117 |
+
id_tensor_storage,
|
| 118 |
+
infer_auto_device_map,
|
| 119 |
+
is_peft_model,
|
| 120 |
+
load_checkpoint_in_model,
|
| 121 |
+
load_offloaded_weights,
|
| 122 |
+
load_state_dict,
|
| 123 |
+
named_module_tensors,
|
| 124 |
+
retie_parameters,
|
| 125 |
+
set_module_tensor_to_device,
|
| 126 |
+
shard_checkpoint,
|
| 127 |
+
)
|
| 128 |
+
from .offload import (
|
| 129 |
+
OffloadedWeightsLoader,
|
| 130 |
+
PrefixedDataset,
|
| 131 |
+
extract_submodules_state_dict,
|
| 132 |
+
load_offloaded_weight,
|
| 133 |
+
offload_state_dict,
|
| 134 |
+
offload_weight,
|
| 135 |
+
save_offload_index,
|
| 136 |
+
)
|
| 137 |
+
from .operations import (
|
| 138 |
+
CannotPadNestedTensorWarning,
|
| 139 |
+
broadcast,
|
| 140 |
+
broadcast_object_list,
|
| 141 |
+
concatenate,
|
| 142 |
+
convert_outputs_to_fp32,
|
| 143 |
+
convert_to_fp32,
|
| 144 |
+
copy_tensor_to_devices,
|
| 145 |
+
find_batch_size,
|
| 146 |
+
find_device,
|
| 147 |
+
gather,
|
| 148 |
+
gather_object,
|
| 149 |
+
get_data_structure,
|
| 150 |
+
honor_type,
|
| 151 |
+
ignorant_find_batch_size,
|
| 152 |
+
initialize_tensors,
|
| 153 |
+
is_namedtuple,
|
| 154 |
+
is_tensor_information,
|
| 155 |
+
is_torch_tensor,
|
| 156 |
+
listify,
|
| 157 |
+
pad_across_processes,
|
| 158 |
+
pad_input_tensors,
|
| 159 |
+
recursively_apply,
|
| 160 |
+
reduce,
|
| 161 |
+
send_to_device,
|
| 162 |
+
slice_tensors,
|
| 163 |
+
)
|
| 164 |
+
from .versions import compare_versions, is_torch_version
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
if is_deepspeed_available():
|
| 168 |
+
from .deepspeed import (
|
| 169 |
+
DeepSpeedEngineWrapper,
|
| 170 |
+
DeepSpeedOptimizerWrapper,
|
| 171 |
+
DeepSpeedSchedulerWrapper,
|
| 172 |
+
DummyOptim,
|
| 173 |
+
DummyScheduler,
|
| 174 |
+
HfDeepSpeedConfig,
|
| 175 |
+
)
|
| 176 |
+
|
| 177 |
+
from .bnb import has_4bit_bnb_layers, load_and_quantize_model
|
| 178 |
+
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
|
| 179 |
+
from .launch import (
|
| 180 |
+
PrepareForLaunch,
|
| 181 |
+
_filter_args,
|
| 182 |
+
prepare_deepspeed_cmd_env,
|
| 183 |
+
prepare_multi_gpu_env,
|
| 184 |
+
prepare_sagemager_args_inputs,
|
| 185 |
+
prepare_simple_launcher_cmd_env,
|
| 186 |
+
prepare_tpu,
|
| 187 |
+
)
|
| 188 |
+
from .megatron_lm import (
|
| 189 |
+
AbstractTrainStep,
|
| 190 |
+
BertTrainStep,
|
| 191 |
+
GPTTrainStep,
|
| 192 |
+
MegatronEngine,
|
| 193 |
+
MegatronLMDummyDataLoader,
|
| 194 |
+
MegatronLMDummyScheduler,
|
| 195 |
+
MegatronLMOptimizerWrapper,
|
| 196 |
+
MegatronLMSchedulerWrapper,
|
| 197 |
+
T5TrainStep,
|
| 198 |
+
avg_losses_across_data_parallel_group,
|
| 199 |
+
gather_across_data_parallel_groups,
|
| 200 |
+
)
|
| 201 |
+
from .megatron_lm import initialize as megatron_lm_initialize
|
| 202 |
+
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
|
| 203 |
+
from .megatron_lm import prepare_model as megatron_lm_prepare_model
|
| 204 |
+
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
|
| 205 |
+
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
|
| 206 |
+
from .memory import find_executable_batch_size, release_memory
|
| 207 |
+
from .other import (
|
| 208 |
+
check_os_kernel,
|
| 209 |
+
clean_state_dict_for_safetensors,
|
| 210 |
+
clear_environment,
|
| 211 |
+
convert_bytes,
|
| 212 |
+
extract_model_from_parallel,
|
| 213 |
+
get_pretty_name,
|
| 214 |
+
is_port_in_use,
|
| 215 |
+
merge_dicts,
|
| 216 |
+
patch_environment,
|
| 217 |
+
recursive_getattr,
|
| 218 |
+
save,
|
| 219 |
+
wait_for_everyone,
|
| 220 |
+
write_basic_config,
|
| 221 |
+
)
|
| 222 |
+
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
|
| 223 |
+
from .torch_xla import install_xla
|
| 224 |
+
from .tqdm import tqdm
|
| 225 |
+
from .transformer_engine import convert_model, has_transformer_engine_layers
|