diff --git a/ckpts/universal/global_step40/zero/12.post_attention_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/12.post_attention_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..f41118393072b836ec3a1a1ba8b391ccbb6da78c --- /dev/null +++ b/ckpts/universal/global_step40/zero/12.post_attention_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:165572568fad0d9c431ae19b33aecc84646d487ae264873a81bb8c60b6dcaa17 +size 9372 diff --git a/ckpts/universal/global_step40/zero/12.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step40/zero/12.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..4cd01d02dd0cd5feae0050e6e3610a99c37ef7db --- /dev/null +++ b/ckpts/universal/global_step40/zero/12.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03eefe570affbc919a85497868156d412b8ccfe08eecec7baf6894fb08e4cfa8 +size 9293 diff --git a/ckpts/universal/global_step40/zero/14.mlp.dense_h_to_4h.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/14.mlp.dense_h_to_4h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..c208582ec47f282e12a4249ef2ca868a24d84435 --- /dev/null +++ b/ckpts/universal/global_step40/zero/14.mlp.dense_h_to_4h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6223ebd887bd55fb48afd407e325a99e6f87d6c113fdf96694f1b25519b053d8 +size 33555612 diff --git a/ckpts/universal/global_step40/zero/14.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step40/zero/14.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..016a26613fa4c5dfbf8704ab8da7e630645f93d3 --- /dev/null +++ b/ckpts/universal/global_step40/zero/14.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2994aa83bde7c30048029d27d3081d0959d85cd9039cade33ba72d03907e903b +size 33555533 diff --git a/ckpts/universal/global_step40/zero/20.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/20.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..bbade8932ca2b0d32899527da76f19e1b32371f1 --- /dev/null +++ b/ckpts/universal/global_step40/zero/20.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3599aefe77491a064284602307c5d243e861a55c13ffae66cdd5e21886ff0e66 +size 9387 diff --git a/ckpts/universal/global_step40/zero/21.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step40/zero/21.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..2fc67d6ad60c2247a8b538a20eb5130e5c9f18bb --- /dev/null +++ b/ckpts/universal/global_step40/zero/21.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0168178f990f6d01a96d00892354cc058836dfa0a396ca2a0926b77d89481cd2 +size 9293 diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7a5c3e930f00697a76f0457e93aa12dc549c578 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da03ded68d4cfccc35b8a7c46ddb0a75719c9019 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/big_modeling.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/big_modeling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..724994619a21388b275c054e42840aa244e643cc Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/big_modeling.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/checkpointing.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/checkpointing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da06f47e297bb7f01b85e4512d5d98eec4000b26 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/checkpointing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/data_loader.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/data_loader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2871be4abfe7234f361cf76a90b9dd958fb9680 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/data_loader.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/hooks.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/hooks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4e0db832c8a79a80ce28b3ccd70209d2e27b0a0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/hooks.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/inference.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/inference.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..854c24ee0a6d0b1f80a371fc177abe84285a8aea Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/inference.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/launchers.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/launchers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..711dada0cc5c16729be252fea457d21118efe4f0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/launchers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/local_sgd.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/local_sgd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa9f62901e7721f72f95f959168e6948546ea5f9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/local_sgd.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/logging.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/logging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31c593c3a34155eaaa5c1a9e08421e3cd1ddf522 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/logging.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/memory_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/memory_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2b925fcfdb51ddd36e717867a8e1a81e609d64b Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/memory_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/optimizer.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/optimizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ceaa99ac418a1a0b1e7e44c074cceb29ed9d977 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/optimizer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/scheduler.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/scheduler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a11eb34891e0b85371c8dc1bfb1f6514f99d86ed Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/scheduler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/state.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/state.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa6e916fc8f5ee33905168b35184b86014753832 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/state.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/tracking.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/tracking.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94258300b4919448d0cc85217e11927d8bfebbfe Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/__pycache__/tracking.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/__init__.py b/venv/lib/python3.10/site-packages/accelerate/commands/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c9cbe26c257b515f657c05e1996d517e69613972 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6624074dd1e13d1f1e98cd35406ce416a3cb226 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/accelerate_cli.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/accelerate_cli.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9dce2016f8b820ee73ee94184091f6fdbdf1d9cd Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/accelerate_cli.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/env.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/env.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..488be6e936c5c5a73b2a7b78e358e6862e589f2d Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/env.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/estimate.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/estimate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..320c1d12e2d38868864ddcd087973026ad368239 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/estimate.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/launch.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/launch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9457f4a8f2551ccdd320c712df31abd57c09eb09 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/launch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/test.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..114dcb26c6fb09fd9836fa3bd2c63306bc164ec1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/test.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/tpu.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/tpu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..636edb08629b43a7566b1247a2c4855078258171 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/tpu.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60bb7a8c54e9838cad0861bd2909f8a63ff3120f Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py b/venv/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py new file mode 100644 index 0000000000000000000000000000000000000000..ea5a08abf51a83ca048524ea0b8758f9d52b7edc --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from accelerate.commands.config import get_config_parser +from accelerate.commands.env import env_command_parser +from accelerate.commands.estimate import estimate_command_parser +from accelerate.commands.launch import launch_command_parser +from accelerate.commands.test import test_command_parser +from accelerate.commands.tpu import tpu_command_parser +from accelerate.commands.utils import CustomArgumentParser + + +def main(): + parser = CustomArgumentParser("Accelerate CLI tool", usage="accelerate []", allow_abbrev=False) + subparsers = parser.add_subparsers(help="accelerate command helpers") + + # Register commands + get_config_parser(subparsers=subparsers) + estimate_command_parser(subparsers=subparsers) + env_command_parser(subparsers=subparsers) + launch_command_parser(subparsers=subparsers) + tpu_command_parser(subparsers=subparsers) + test_command_parser(subparsers=subparsers) + + # Let's go + args = parser.parse_args() + + if not hasattr(args, "func"): + parser.print_help() + exit(1) + + # Run + args.func(args) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/env.py b/venv/lib/python3.10/site-packages/accelerate/commands/env.py new file mode 100644 index 0000000000000000000000000000000000000000..fb2f60f787a9eba3f75b6ac9171aefd0ffc61647 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/env.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python + +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os +import platform +import subprocess + +import numpy as np +import psutil +import torch + +from accelerate import __version__ as version +from accelerate.commands.config import default_config_file, load_config_from_file + +from ..utils import is_mlu_available, is_npu_available, is_xpu_available + + +def env_command_parser(subparsers=None): + if subparsers is not None: + parser = subparsers.add_parser("env") + else: + parser = argparse.ArgumentParser("Accelerate env command") + + parser.add_argument( + "--config_file", default=None, help="The config file to use for the default values in the launching script." + ) + + if subparsers is not None: + parser.set_defaults(func=env_command) + return parser + + +def env_command(args): + pt_version = torch.__version__ + pt_cuda_available = torch.cuda.is_available() + pt_xpu_available = is_xpu_available() + pt_mlu_available = is_mlu_available() + pt_npu_available = is_npu_available() + + accelerate_config = "Not found" + # Get the default from the config file. + if args.config_file is not None or os.path.isfile(default_config_file): + accelerate_config = load_config_from_file(args.config_file).to_dict() + + # if we can run which, get it + command = None + bash_location = "Not found" + if os.name == "nt": + command = ["where", "accelerate"] + elif os.name == "posix": + command = ["which", "accelerate"] + if command is not None: + bash_location = subprocess.check_output(command, text=True, stderr=subprocess.STDOUT).strip() + info = { + "`Accelerate` version": version, + "Platform": platform.platform(), + "`accelerate` bash location": bash_location, + "Python version": platform.python_version(), + "Numpy version": np.__version__, + "PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})", + "PyTorch XPU available": str(pt_xpu_available), + "PyTorch NPU available": str(pt_npu_available), + "PyTorch MLU available": str(pt_mlu_available), + "System RAM": f"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB", + } + if pt_cuda_available: + info["GPU type"] = torch.cuda.get_device_name() + + print("\nCopy-and-paste the text below in your GitHub issue\n") + print("\n".join([f"- {prop}: {val}" for prop, val in info.items()])) + + print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:") + accelerate_config_str = ( + "\n".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()]) + if isinstance(accelerate_config, dict) + else f"\t{accelerate_config}" + ) + print(accelerate_config_str) + + info["`Accelerate` configs"] = accelerate_config + + return info + + +def main() -> int: + parser = env_command_parser() + args = parser.parse_args() + env_command(args) + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/estimate.py b/venv/lib/python3.10/site-packages/accelerate/commands/estimate.py new file mode 100644 index 0000000000000000000000000000000000000000..56da3c5ad9e953687fab71dfc1fb0a878309d1d6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/estimate.py @@ -0,0 +1,309 @@ +#!/usr/bin/env python + +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from huggingface_hub import model_info +from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError + +from accelerate import init_empty_weights +from accelerate.commands.utils import CustomArgumentParser +from accelerate.utils import ( + calculate_maximum_sizes, + convert_bytes, + is_timm_available, + is_transformers_available, +) + + +if is_transformers_available(): + import transformers + from transformers import AutoConfig, AutoModel + +if is_timm_available(): + import timm + + +def verify_on_hub(repo: str, token: str = None): + "Verifies that the model is on the hub and returns the model info." + try: + return model_info(repo, token=token) + except GatedRepoError: + return "gated" + except RepositoryNotFoundError: + return "repo" + + +def check_has_model(error): + """ + Checks what library spawned `error` when a model is not found + """ + if is_timm_available() and isinstance(error, RuntimeError) and "Unknown model" in error.args[0]: + return "timm" + elif ( + is_transformers_available() + and isinstance(error, OSError) + and "does not appear to have a file named" in error.args[0] + ): + return "transformers" + else: + return "unknown" + + +def create_empty_model(model_name: str, library_name: str, trust_remote_code: bool = False, access_token: str = None): + """ + Creates an empty model from its parent library on the `Hub` to calculate the overall memory consumption. + + Args: + model_name (`str`): + The model name on the Hub + library_name (`str`): + The library the model has an integration with, such as `transformers`. Will be used if `model_name` has no + metadata on the Hub to determine the library. + trust_remote_code (`bool`, `optional`, defaults to `False`): + Whether or not to allow for custom models defined on the Hub in their own modeling files. This option + should only be set to `True` for repositories you trust and in which you have read the code, as it will + execute code present on the Hub on your local machine. + access_token (`str`, `optional`, defaults to `None`): + The access token to use to access private or gated models on the Hub. (for use on the Gradio app) + + Returns: + `torch.nn.Module`: The torch model that has been initialized on the `meta` device. + + """ + model_info = verify_on_hub(model_name, access_token) + # Simplified errors + if model_info == "gated": + raise GatedRepoError( + f"Repo for model `{model_name}` is gated. You must be authenticated to access it. Please run `huggingface-cli login`." + ) + elif model_info == "repo": + raise RepositoryNotFoundError( + f"Repo for model `{model_name}` does not exist on the Hub. If you are trying to access a private repo," + " make sure you are authenticated via `huggingface-cli login` and have access." + ) + if library_name is None: + library_name = getattr(model_info, "library_name", False) + if not library_name: + raise ValueError( + f"Model `{model_name}` does not have any library metadata on the Hub, please manually pass in a `--library_name` to use (such as `transformers`)" + ) + if library_name == "transformers": + if not is_transformers_available(): + raise ImportError( + f"To check `{model_name}`, `transformers` must be installed. Please install it via `pip install transformers`" + ) + print(f"Loading pretrained config for `{model_name}` from `transformers`...") + if model_info.config is None: + raise RuntimeError(f"Tried to load `{model_name}` with `transformers` but it does not have any metadata.") + + auto_map = model_info.config.get("auto_map", False) + config = AutoConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code, token=access_token) + with init_empty_weights(): + # remote code could specify a specific `AutoModel` class in the `auto_map` + constructor = AutoModel + if isinstance(auto_map, dict): + value = None + for key in auto_map.keys(): + if key.startswith("AutoModelFor"): + value = key + break + if value is not None: + constructor = getattr(transformers, value) + model = constructor.from_config(config, trust_remote_code=trust_remote_code) + elif library_name == "timm": + if not is_timm_available(): + raise ImportError( + f"To check `{model_name}`, `timm` must be installed. Please install it via `pip install timm`" + ) + print(f"Loading pretrained config for `{model_name}` from `timm`...") + with init_empty_weights(): + model = timm.create_model(model_name, pretrained=False) + else: + raise ValueError( + f"Library `{library_name}` is not supported yet, please open an issue on GitHub for us to add support." + ) + return model + + +def create_ascii_table(headers: list, rows: list, title: str): + "Creates a pretty table from a list of rows, minimal version of `tabulate`." + sep_char, in_between = "│", "─" + column_widths = [] + for i in range(len(headers)): + column_values = [row[i] for row in rows] + [headers[i]] + max_column_width = max(len(value) for value in column_values) + column_widths.append(max_column_width) + + formats = [f"%{column_widths[i]}s" for i in range(len(rows[0]))] + + pattern = f"{sep_char}{sep_char.join(formats)}{sep_char}" + diff = 0 + + def make_row(left_char, middle_char, right_char): + return f"{left_char}{middle_char.join([in_between * n for n in column_widths])}{in_between * diff}{right_char}" + + separator = make_row("├", "┼", "┤") + if len(title) > sum(column_widths): + diff = abs(len(title) - len(separator)) + column_widths[-1] += diff + + # Update with diff + separator = make_row("├", "┼", "┤") + initial_rows = [ + make_row("┌", in_between, "┐"), + f"{sep_char}{title.center(len(separator) - 2)}{sep_char}", + make_row("├", "┬", "┤"), + ] + table = "\n".join(initial_rows) + "\n" + column_widths[-1] += diff + centered_line = [text.center(column_widths[i]) for i, text in enumerate(headers)] + table += f"{pattern % tuple(centered_line)}\n{separator}\n" + for i, line in enumerate(rows): + centered_line = [t.center(column_widths[i]) for i, t in enumerate(line)] + table += f"{pattern % tuple(centered_line)}\n" + table += f'└{"┴".join([in_between * n for n in column_widths])}┘' + + return table + + +def estimate_command_parser(subparsers=None): + if subparsers is not None: + parser = subparsers.add_parser("estimate-memory") + else: + parser = CustomArgumentParser(description="Model size estimator for fitting a model onto CUDA memory.") + + parser.add_argument("model_name", type=str, help="The model name on the Hugging Face Hub.") + parser.add_argument( + "--library_name", + type=str, + help="The library the model has an integration with, such as `transformers`, needed only if this information is not stored on the Hub.", + choices=["timm", "transformers"], + ) + parser.add_argument( + "--dtypes", + type=str, + nargs="+", + default=["float32", "float16", "int8", "int4"], + help="The dtypes to use for the model, must be one (or many) of `float32`, `float16`, `int8`, and `int4`", + choices=["float32", "float16", "int8", "int4"], + ) + parser.add_argument( + "--trust_remote_code", + action="store_true", + help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. This flag + should only be used for repositories you trust and in which you have read the code, as it will execute + code present on the Hub on your local machine.""", + default=False, + ) + + if subparsers is not None: + parser.set_defaults(func=estimate_command) + return parser + + +def estimate_training_usage(bytes: int, mixed_precision: str, msamp_config: str = None) -> dict: + """ + Given an amount of `bytes` and `mixed_precision`, calculates how much training memory is needed for a batch size of + 1. + + Args: + bytes (`int`): + The size of the model being trained. + mixed_precision (`str`): + The mixed precision that would be ran. + msamp_config (`str`): + The msamp config to estimate the training memory for if `mixed_precision` is set to `"fp8"`. + """ + memory_sizes = {"model": -1, "optimizer": -1, "gradients": -1, "step": -1} + fp32_size = bytes + fp16_size = bytes // 2 + + if mixed_precision == "float32": + memory_sizes["model"] = fp32_size + memory_sizes["gradients"] = fp32_size + memory_sizes["optimizer"] = fp32_size * 2 + memory_sizes["step"] = fp32_size * 4 + elif mixed_precision in ("float16", "bfloat16") or (mixed_precision == "fp8" and msamp_config is None): + # With native `TransformersEngine`, there is no memory savings with FP8 + # With mixed precision training, the model has weights stored + # in FP16 and FP32 + memory_sizes["model"] = fp32_size + # 1.5 from weight gradient + computation (GEMM) + memory_sizes["gradients"] = fp32_size + fp16_size + # 2x from optimizer states + memory_sizes["optimizer"] = fp32_size * 2 # Optimizer states + memory_sizes["step"] = memory_sizes["optimizer"] + return memory_sizes + + +def gather_data(args): + "Creates an empty model and gathers the data for the sizes" + try: + model = create_empty_model( + args.model_name, library_name=args.library_name, trust_remote_code=args.trust_remote_code + ) + except (RuntimeError, OSError) as e: + library = check_has_model(e) + if library != "unknown": + raise RuntimeError( + f"Tried to load `{args.model_name}` with `{library}` but a possible model to load was not found inside the repo." + ) + raise e + + total_size, largest_layer = calculate_maximum_sizes(model) + + data = [] + + for dtype in args.dtypes: + dtype_total_size = total_size + dtype_largest_layer = largest_layer[0] + dtype_training_size = estimate_training_usage(dtype_total_size, dtype) + if dtype == "float16": + dtype_total_size /= 2 + dtype_largest_layer /= 2 + elif dtype == "int8": + dtype_total_size /= 4 + dtype_largest_layer /= 4 + elif dtype == "int4": + dtype_total_size /= 8 + dtype_largest_layer /= 8 + data.append([dtype, dtype_largest_layer, dtype_total_size, dtype_training_size]) + return data + + +def estimate_command(args): + data = gather_data(args) + for row in data: + for i, item in enumerate(row): + if isinstance(item, (int, float)): + row[i] = convert_bytes(item) + elif isinstance(item, dict): + training_usage = max(item.values()) + row[i] = convert_bytes(training_usage) if training_usage != -1 else "N/A" + + headers = ["dtype", "Largest Layer", "Total Size", "Training using Adam"] + + title = f"Memory Usage for loading `{args.model_name}`" + table = create_ascii_table(headers, data, title) + print(table) + + +def main(): + parser = estimate_command_parser() + args = parser.parse_args() + estimate_command(args) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/launch.py b/venv/lib/python3.10/site-packages/accelerate/commands/launch.py new file mode 100644 index 0000000000000000000000000000000000000000..f6b91654bc63c3cd0db9cca5f72be511458a20fb --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/launch.py @@ -0,0 +1,1085 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import importlib +import logging +import os +import subprocess +import sys +from pathlib import Path + +import psutil +import torch + +from accelerate.commands.config import default_config_file, load_config_from_file +from accelerate.commands.config.config_args import SageMakerConfig +from accelerate.commands.config.config_utils import DYNAMO_BACKENDS +from accelerate.commands.utils import CustomArgumentParser +from accelerate.state import get_int_from_env +from accelerate.utils import ( + ComputeEnvironment, + DistributedType, + PrepareForLaunch, + _filter_args, + check_cuda_p2p_ib_support, + convert_dict_to_env_variables, + is_bf16_available, + is_deepspeed_available, + is_mlu_available, + is_npu_available, + is_rich_available, + is_sagemaker_available, + is_torch_version, + is_torch_xla_available, + is_xpu_available, + patch_environment, + prepare_deepspeed_cmd_env, + prepare_multi_gpu_env, + prepare_sagemager_args_inputs, + prepare_simple_launcher_cmd_env, + prepare_tpu, +) +from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS, TORCH_DYNAMO_MODES + + +if is_rich_available(): + from rich import get_console + from rich.logging import RichHandler + + FORMAT = "%(message)s" + logging.basicConfig(format=FORMAT, datefmt="[%X]", handlers=[RichHandler()]) + + +logger = logging.getLogger(__name__) + + +options_to_group = { + "multi_gpu": "Distributed GPUs", + "tpu": "TPU", + "use_deepspeed": "DeepSpeed Arguments", + "use_fsdp": "FSDP Arguments", + "use_megatron_lm": "Megatron-LM Arguments", +} + + +def clean_option(option): + "Finds all cases of - after the first two characters and changes them to _" + if option.startswith("--"): + return option[2:].replace("-", "_") + + +class CustomHelpFormatter(argparse.HelpFormatter): + """ + This is a custom help formatter that will hide all arguments that are not used in the command line when the help is + called. This is useful for the case where the user is using a specific platform and only wants to see the arguments + for that platform. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.titles = [ + "Hardware Selection Arguments", + "Resource Selection Arguments", + "Training Paradigm Arguments", + "positional arguments", + "optional arguments", + ] + + def add_argument(self, action: argparse.Action): + if "accelerate" in sys.argv[0] and "launch" in sys.argv[1:]: + args = sys.argv[2:] + else: + args = sys.argv[1:] + + if len(args) > 1: + args = list(map(clean_option, args)) + used_platforms = [arg for arg in args if arg in options_to_group.keys()] + used_titles = [options_to_group[o] for o in used_platforms] + if action.container.title not in self.titles + used_titles: + action.help = argparse.SUPPRESS + elif action.container.title == "Hardware Selection Arguments": + if set(action.option_strings).isdisjoint(set(args)): + action.help = argparse.SUPPRESS + else: + action.help = action.help + " (currently selected)" + elif action.container.title == "Training Paradigm Arguments": + if set(action.option_strings).isdisjoint(set(args)): + action.help = argparse.SUPPRESS + else: + action.help = action.help + " (currently selected)" + + action.option_strings = [s for s in action.option_strings if "-" not in s[2:]] + super().add_argument(action) + + def end_section(self): + if len(self._current_section.items) < 2: + self._current_section.items = [] + self._current_section.heading = "" + super().end_section() + + +def launch_command_parser(subparsers=None): + description = "Launch a python script in a distributed scenario. Arguments can be passed in with either hyphens (`--num-processes=2`) or underscores (`--num_processes=2`)" + if subparsers is not None: + parser = subparsers.add_parser( + "launch", description=description, add_help=False, allow_abbrev=False, formatter_class=CustomHelpFormatter + ) + else: + parser = CustomArgumentParser( + "Accelerate launch command", + description=description, + add_help=False, + allow_abbrev=False, + formatter_class=CustomHelpFormatter, + ) + + parser.add_argument("-h", "--help", action="help", help="Show this help message and exit.") + + parser.add_argument( + "--config_file", + default=None, + help="The config file to use for the default values in the launching script.", + ) + parser.add_argument( + "--quiet", + "-q", + action="store_true", + help="Silence subprocess errors from the launch stack trace and only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations)", + ) + # Hardware selection arguments + hardware_args = parser.add_argument_group( + "Hardware Selection Arguments", "Arguments for selecting the hardware to be used." + ) + hardware_args.add_argument( + "--cpu", default=False, action="store_true", help="Whether or not to force the training on the CPU." + ) + hardware_args.add_argument( + "--multi_gpu", + default=False, + action="store_true", + help="Whether or not this should launch a distributed GPU training.", + ) + hardware_args.add_argument( + "--tpu", default=False, action="store_true", help="Whether or not this should launch a TPU training." + ) + hardware_args.add_argument( + "--ipex", + default=False, + action="store_true", + help="Whether or not this should launch a Intel PyTorch Extension (IPEX) training.", + ) + + # Resource selection arguments + resource_args = parser.add_argument_group( + "Resource Selection Arguments", "Arguments for fine-tuning how available hardware should be used." + ) + resource_args.add_argument( + "--mixed_precision", + type=str, + choices=["no", "fp16", "bf16", "fp8"], + help="Whether or not to use mixed precision training. " + "Choose between FP16 and BF16 (bfloat16) training. " + "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.", + ) + resource_args.add_argument( + "--num_processes", type=int, default=None, help="The total number of processes to be launched in parallel." + ) + resource_args.add_argument( + "--num_machines", type=int, default=None, help="The total number of machines used in this training." + ) + resource_args.add_argument( + "--num_cpu_threads_per_process", + type=int, + default=None, + help="The number of CPU threads per process. Can be tuned for optimal performance.", + ) + resource_args.add_argument( + "--enable_cpu_affinity", + default=False, + action="store_true", + help="Whether or not CPU affinity and balancing should be enabled. Currently only supported on NVIDIA hardware.", + ) + + # Dynamo arguments + resource_args.add_argument( + "--dynamo_backend", + type=str, + choices=["no"] + [b.lower() for b in DYNAMO_BACKENDS], + help="Choose a backend to optimize your training with dynamo, see more at " + "https://github.com/pytorch/torchdynamo.", + ) + resource_args.add_argument( + "--dynamo_mode", + type=str, + default="default", + choices=TORCH_DYNAMO_MODES, + help="Choose a mode to optimize your training with dynamo.", + ) + resource_args.add_argument( + "--dynamo_use_fullgraph", + default=False, + action="store_true", + help="Whether to use full graph mode for dynamo or it is ok to break model into several subgraphs", + ) + resource_args.add_argument( + "--dynamo_use_dynamic", + default=False, + action="store_true", + help="Whether to enable dynamic shape tracing.", + ) + + # Training Paradigm arguments + paradigm_args = parser.add_argument_group( + "Training Paradigm Arguments", "Arguments for selecting which training paradigm to be used." + ) + paradigm_args.add_argument( + "--use_deepspeed", + default=False, + action="store_true", + help="Whether to use deepspeed.", + ) + paradigm_args.add_argument( + "--use_fsdp", + default=False, + action="store_true", + help="Whether to use fsdp.", + ) + paradigm_args.add_argument( + "--use_megatron_lm", + default=False, + action="store_true", + help="Whether to use Megatron-LM.", + ) + paradigm_args.add_argument( + "--use_xpu", + default=False, + action="store_true", + help="Whether to use IPEX plugin to speed up training on XPU specifically.", + ) + + # distributed GPU training arguments + distributed_args = parser.add_argument_group("Distributed GPUs", "Arguments related to distributed GPU training.") + distributed_args.add_argument( + "--gpu_ids", + default=None, + help="What GPUs (by id) should be used for training on this machine as a comma-seperated list", + ) + distributed_args.add_argument( + "--same_network", + default=False, + action="store_true", + help="Whether all machines used for multinode training exist on the same local network.", + ) + distributed_args.add_argument( + "--machine_rank", type=int, default=None, help="The rank of the machine on which this script is launched." + ) + distributed_args.add_argument( + "--main_process_ip", type=str, default=None, help="The IP address of the machine of rank 0." + ) + distributed_args.add_argument( + "--main_process_port", + type=int, + default=None, + help="The port to use to communicate with the machine of rank 0.", + ) + distributed_args.add_argument( + "-t", + "--tee", + default="0", + type=str, + help="Tee std streams into a log file and also to console.", + ) + distributed_args.add_argument( + "--role", + type=str, + default="default", + help="User-defined role for the workers.", + ) + # Rendezvous related arguments + distributed_args.add_argument( + "--rdzv_backend", + type=str, + default="static", + help="The rendezvous method to use, such as 'static' (the default) or 'c10d'", + ) + distributed_args.add_argument( + "--rdzv_conf", + type=str, + default="", + help="Additional rendezvous configuration (=,=,...).", + ) + distributed_args.add_argument( + "--max_restarts", + type=int, + default=0, + help="Maximum number of worker group restarts before failing.", + ) + distributed_args.add_argument( + "--monitor_interval", + type=float, + default=5, + help="Interval, in seconds, to monitor the state of workers.", + ) + parser.add_argument( + "-m", + "--module", + action="store_true", + help="Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.", + ) + parser.add_argument( + "--no_python", + action="store_true", + help="Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.", + ) + + # TPU arguments + tpu_args = parser.add_argument_group("TPU", "Arguments related to TPU.") + tpu_args.add_argument( + "--tpu_cluster", + action="store_true", + dest="tpu_use_cluster", + help="Whether to use a GCP TPU pod for training.", + ) + tpu_args.add_argument( + "--no_tpu_cluster", + action="store_false", + dest="tpu_use_cluster", + help="Should not be passed explicitly, this is for internal use only.", + ) + tpu_args.add_argument( + "--tpu_use_sudo", + action="store_true", + help="Whether to use `sudo` when running the TPU training script in each pod.", + ) + tpu_args.add_argument( + "--vm", + type=str, + action="append", + help=( + "List of single Compute VM instance names. " + "If not provided we assume usage of instance groups. For TPU pods." + ), + ) + tpu_args.add_argument( + "--env", + type=str, + action="append", + help="List of environment variables to set on the Compute VM instances. For TPU pods.", + ) + tpu_args.add_argument( + "--main_training_function", + type=str, + default=None, + help="The name of the main function to be executed in your script (only for TPU training).", + ) + tpu_args.add_argument( + "--downcast_bf16", + action="store_true", + help="Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.", + ) + + # DeepSpeed arguments + deepspeed_args = parser.add_argument_group("DeepSpeed Arguments", "Arguments related to DeepSpeed.") + deepspeed_args.add_argument( + "--deepspeed_config_file", + default=None, + type=str, + help="DeepSpeed config file.", + ) + deepspeed_args.add_argument( + "--zero_stage", + default=None, + type=int, + help="DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). " + "If unspecified, will default to `2`.", + ) + deepspeed_args.add_argument( + "--offload_optimizer_device", + default=None, + type=str, + help="Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). " + "If unspecified, will default to 'none'.", + ) + deepspeed_args.add_argument( + "--offload_param_device", + default=None, + type=str, + help="Decides where (none|cpu|nvme) to offload parameters (useful only when `use_deepspeed` flag is passed). " + "If unspecified, will default to 'none'.", + ) + deepspeed_args.add_argument( + "--offload_optimizer_nvme_path", + default=None, + type=str, + help="Decides Nvme Path to offload optimizer states (useful only when `use_deepspeed` flag is passed). " + "If unspecified, will default to 'none'.", + ) + deepspeed_args.add_argument( + "--offload_param_nvme_path", + default=None, + type=str, + help="Decides Nvme Path to offload parameters (useful only when `use_deepspeed` flag is passed). " + "If unspecified, will default to 'none'.", + ) + deepspeed_args.add_argument( + "--gradient_accumulation_steps", + default=None, + type=int, + help="No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed). " + "If unspecified, will default to `1`.", + ) + deepspeed_args.add_argument( + "--gradient_clipping", + default=None, + type=float, + help="gradient clipping value used in your training script (useful only when `use_deepspeed` flag is passed). " + "If unspecified, will default to `1.0`.", + ) + deepspeed_args.add_argument( + "--zero3_init_flag", + default=None, + type=str, + help="Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. " + "Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `true`.", + ) + deepspeed_args.add_argument( + "--zero3_save_16bit_model", + default=None, + type=str, + help="Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. " + "Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `false`.", + ) + deepspeed_args.add_argument( + "--deepspeed_hostfile", + default=None, + type=str, + help="DeepSpeed hostfile for configuring multi-node compute resources.", + ) + deepspeed_args.add_argument( + "--deepspeed_exclusion_filter", + default=None, + type=str, + help="DeepSpeed exclusion filter string when using mutli-node setup.", + ) + deepspeed_args.add_argument( + "--deepspeed_inclusion_filter", + default=None, + type=str, + help="DeepSpeed inclusion filter string when using mutli-node setup.", + ) + deepspeed_args.add_argument( + "--deepspeed_multinode_launcher", + default=None, + type=str, + help="DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.", + ) + + # fsdp arguments + fsdp_args = parser.add_argument_group("FSDP Arguments", "Arguments related to Fully Shared Data Parallelism.") + fsdp_args.add_argument( + "--fsdp_offload_params", + default="false", + type=str, + help="Decides Whether (true|false) to offload parameters and gradients to CPU. (useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_min_num_params", + type=int, + default=1e8, + help="FSDP's minimum number of parameters for Default Auto Wrapping. (useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_sharding_strategy", + type=str, + default="FULL_SHARD", + help="FSDP's Sharding Strategy. (useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_auto_wrap_policy", + type=str, + default=None, + help="FSDP's auto wrap policy. (useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_transformer_layer_cls_to_wrap", + default=None, + type=str, + help="Transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block` .... " + "(useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_backward_prefetch_policy", + default=None, + type=str, + help="This argument is deprecated and will be removed in version 0.27.0 of 🤗 Accelerate. Use `fsdp_backward_prefetch` instead.", + ) + fsdp_args.add_argument( + "--fsdp_backward_prefetch", + default=None, + type=str, + help="FSDP's backward prefetch policy. (useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_state_dict_type", + default=None, + type=str, + help="FSDP's state dict type. (useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_forward_prefetch", + default="false", + type=str, + help="If True, then FSDP explicitly prefetches the next upcoming " + "all-gather while executing in the forward pass (useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_use_orig_params", + default="true", + type=str, + help="If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable paramteres." + " (useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_cpu_ram_efficient_loading", + default="true", + type=str, + help="If True, only the first process loads the pretrained model checkoint while all other processes have empty weights. " + "Only applicable for 🤗 Transformers. When using this, `--fsdp_sync_module_states` needs to True. " + "(useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_sync_module_states", + default="true", + type=str, + help="If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0." + " (useful only when `use_fsdp` flag is passed).", + ) + + # megatron_lm args + megatron_lm_args = parser.add_argument_group("Megatron-LM Arguments", "Arguments related to Megatron-LM.") + megatron_lm_args.add_argument( + "--megatron_lm_tp_degree", + type=int, + default=1, + help="Megatron-LM's Tensor Parallelism (TP) degree. (useful only when `use_megatron_lm` flag is passed).", + ) + megatron_lm_args.add_argument( + "--megatron_lm_pp_degree", + type=int, + default=1, + help="Megatron-LM's Pipeline Parallelism (PP) degree. (useful only when `use_megatron_lm` flag is passed).", + ) + megatron_lm_args.add_argument( + "--megatron_lm_num_micro_batches", + type=int, + default=None, + help="Megatron-LM's number of micro batches when PP degree > 1. (useful only when `use_megatron_lm` flag is passed).", + ) + megatron_lm_args.add_argument( + "--megatron_lm_sequence_parallelism", + default=None, + type=str, + help="Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1. " + "(useful only when `use_megatron_lm` flag is passed).", + ) + megatron_lm_args.add_argument( + "--megatron_lm_recompute_activations", + default=None, + type=str, + help="Decides Whether (true|false) to enable Selective Activation Recomputation. " + "(useful only when `use_megatron_lm` flag is passed).", + ) + megatron_lm_args.add_argument( + "--megatron_lm_use_distributed_optimizer", + default=None, + type=str, + help="Decides Whether (true|false) to use distributed optimizer " + "which shards optimizer state and gradients across Data Pralellel (DP) ranks. " + "(useful only when `use_megatron_lm` flag is passed).", + ) + megatron_lm_args.add_argument( + "--megatron_lm_gradient_clipping", + default=1.0, + type=float, + help="Megatron-LM's gradient clipping value based on global L2 Norm (0 to disable). " + "(useful only when `use_megatron_lm` flag is passed).", + ) + + # AWS arguments + aws_args = parser.add_argument_group("AWS Arguments", "Arguments related to AWS.") + aws_args.add_argument( + "--aws_access_key_id", + type=str, + default=None, + help="The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job", + ) + aws_args.add_argument( + "--aws_secret_access_key", + type=str, + default=None, + help="The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job.", + ) + parser.add_argument( + "--debug", + action="store_true", + help="Whether to print out the torch.distributed stack trace when something fails.", + ) + parser.add_argument( + "training_script", + type=str, + help=( + "The full path to the script to be launched in parallel, followed by all the arguments for the training " + "script." + ), + ) + + # MPI arguments + mpirun_args = parser.add_argument_group("MPI Arguments", "Arguments related to mpirun for Multi-CPU") + mpirun_args.add_argument( + "--mpirun_hostfile", + type=str, + default=None, + help="Location for a hostfile for using Accelerate to launch a multi-CPU training job with mpirun. This will " + "get passed to the MPI --hostfile or -f parameter, depending on which MPI program is installed.", + ) + mpirun_args.add_argument( + "--mpirun_ccl", + type=int, + default=1, + help="The number of oneCCL worker threads when using Accelerate to launch multi-CPU training with mpirun.", + ) + + # Other arguments of the training scripts + parser.add_argument("training_script_args", nargs=argparse.REMAINDER, help="Arguments of the training script.") + + if subparsers is not None: + parser.set_defaults(func=launch_command) + return parser + + +def simple_launcher(args): + cmd, current_env = prepare_simple_launcher_cmd_env(args) + + process = subprocess.Popen(cmd, env=current_env) + process.wait() + if process.returncode != 0: + if not args.quiet: + raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd) + else: + sys.exit(1) + + +def multi_gpu_launcher(args): + import torch.distributed.run as distrib_run + + current_env = prepare_multi_gpu_env(args) + if not check_cuda_p2p_ib_support(): + message = "Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled." + warn = False + if "NCCL_P2P_DISABLE" not in current_env: + current_env["NCCL_P2P_DISABLE"] = "1" + warn = True + if "NCCL_IB_DISABLE" not in current_env: + current_env["NCCL_IB_DISABLE"] = "1" + warn = True + if warn: + logger.warning(message) + + debug = getattr(args, "debug", False) + args = _filter_args( + args, + distrib_run.get_args_parser(), + ["--training_script", args.training_script, "--training_script_args", args.training_script_args], + ) + + with patch_environment(**current_env): + try: + distrib_run.run(args) + except Exception: + if is_rich_available() and debug: + console = get_console() + console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]") + console.print_exception(suppress=[__file__], show_locals=False) + else: + raise + + +def deepspeed_launcher(args): + import torch.distributed.run as distrib_run + + if not is_deepspeed_available(): + raise ImportError("DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.") + else: + from deepspeed.launcher.runner import DEEPSPEED_ENVIRONMENT_NAME + + cmd, current_env = prepare_deepspeed_cmd_env(args) + if not check_cuda_p2p_ib_support(): + message = "Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled." + warn = False + if "NCCL_P2P_DISABLE" not in current_env: + current_env["NCCL_P2P_DISABLE"] = "1" + warn = True + if "NCCL_IB_DISABLE" not in current_env: + current_env["NCCL_IB_DISABLE"] = "1" + warn = True + if warn: + logger.warning(message) + + if args.num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]: + with open(DEEPSPEED_ENVIRONMENT_NAME, "a") as f: + valid_env_items = convert_dict_to_env_variables(current_env) + if len(valid_env_items) > 1: + f.writelines(valid_env_items) + + process = subprocess.Popen(cmd, env=current_env) + process.wait() + if process.returncode != 0: + if not args.quiet: + raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd) + else: + sys.exit(1) + else: + debug = getattr(args, "debug", False) + args = _filter_args( + args, + distrib_run.get_args_parser(), + ["--training_script", args.training_script, "--training_script_args", args.training_script_args], + ) + with patch_environment(**current_env): + try: + distrib_run.run(args) + except Exception: + if is_rich_available() and debug: + console = get_console() + console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]") + console.print_exception(suppress=[__file__], show_locals=False) + else: + raise + + +def tpu_launcher(args): + import torch_xla.distributed.xla_multiprocessing as xmp + + if args.no_python: + raise ValueError("--no_python cannot be used with TPU launcher") + + args, current_env = prepare_tpu(args, {}) + + if args.module: + mod_name = args.training_script + else: + # Import training_script as a module + script_path = Path(args.training_script) + sys.path.append(str(script_path.parent.resolve())) + mod_name = script_path.stem + + mod = importlib.import_module(mod_name) + if not hasattr(mod, args.main_training_function): + raise ValueError( + f"Your training script should have a function named {args.main_training_function}, or you should pass a " + "different value to `--main_training_function`." + ) + + # Patch sys.argv + sys.argv = [mod.__file__] + args.training_script_args + + main_function = getattr(mod, args.main_training_function) + with patch_environment(**current_env): + xmp.spawn(PrepareForLaunch(main_function), args=(), nprocs=args.num_processes) + + +def tpu_pod_launcher(args): + from torch_xla.distributed import xla_dist + + current_env = {} + args, current_env = prepare_tpu(args, current_env, True) + debug = getattr(args, "debug", False) + + training_script = args.training_script + training_script_args = args.training_script_args + new_args = _filter_args( + args, xla_dist.get_args_parser(), ["--tpu", args.tpu_name, "--positional", "", "--restart-tpuvm-pod-server"] + ) + + if args.tpu_use_sudo: + new_cmd = ["sudo"] + else: + new_cmd = [] + + new_cmd += [ + "accelerate-launch", + "--tpu", + "--no_tpu_cluster", + "--num_machines", + "1", + "--mixed_precision", + "no", + "--dynamo_backend", + "no", + "--num_processes", + str(args.num_processes), + "--main_training_function", + str(args.main_training_function), + training_script, + ] + training_script_args + + new_args.positional = new_cmd + bad_flags = "" + for arg in vars(new_args): + if arg.startswith("docker_"): + value = getattr(new_args, arg) + if value != "" and value is not None: + bad_flags += f'{arg}="{value}"\n' + if bad_flags != "": + raise ValueError( + f"Docker containers are not supported for TPU pod launcher currently, please remove the following flags:\n{bad_flags}" + ) + new_args.env = [f"{k}={v}" for k, v in current_env.items()] + new_args.env.append("ACCELERATE_IN_TPU_POD=1") + try: + xla_dist.resolve_and_execute(new_args) + except Exception: + if is_rich_available() and debug: + console = get_console() + console.print("\n[bold red]Using --debug, `torch_xla.xla_dist` Stack Trace:[/bold red]") + console.print_exception(suppress=[__file__], show_locals=False) + else: + raise + + +def sagemaker_launcher(sagemaker_config: SageMakerConfig, args): + if not is_sagemaker_available(): + raise ImportError( + "Please install sagemaker to be able to launch training on Amazon SageMaker with `pip install accelerate[sagemaker]`" + ) + if args.module or args.no_python: + raise ValueError( + "SageMaker requires a python training script file and cannot be used with --module or --no_python" + ) + + from sagemaker.huggingface import HuggingFace + + args, sagemaker_inputs = prepare_sagemager_args_inputs(sagemaker_config, args) + + huggingface_estimator = HuggingFace(**args) + + huggingface_estimator.fit(inputs=sagemaker_inputs) + print(f"You can find your model data at: {huggingface_estimator.model_data}") + + +def _validate_launch_command(args): + # Sanity checks + if sum([args.multi_gpu, args.cpu, args.tpu, args.use_deepspeed, args.use_fsdp]) > 1: + raise ValueError( + "You can only use one of `--cpu`, `--multi_gpu`, `--tpu`, `--use_deepspeed`, `--use_fsdp` at a time." + ) + if args.multi_gpu and (args.num_processes is not None) and (args.num_processes < 2): + raise ValueError("You need to use at least 2 processes to use `--multi_gpu`.") + + defaults = None + warned = [] + mp_from_config_flag = False + # Get the default from the config file. + if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu: + defaults = load_config_from_file(args.config_file) + if ( + not args.multi_gpu + and not args.tpu + and not args.tpu_use_cluster + and not args.use_deepspeed + and not args.use_fsdp + and not args.use_megatron_lm + ): + args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED + args.multi_gpu = ( + True + if defaults.distributed_type + in ( + DistributedType.MULTI_GPU, + DistributedType.MULTI_NPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_XPU, + ) + else False + ) + args.tpu = defaults.distributed_type == DistributedType.XLA + args.use_fsdp = defaults.distributed_type == DistributedType.FSDP + args.use_megatron_lm = defaults.distributed_type == DistributedType.MEGATRON_LM + args.tpu_use_cluster = defaults.tpu_use_cluster if args.tpu else False + if args.gpu_ids is None: + if defaults.gpu_ids is not None: + args.gpu_ids = defaults.gpu_ids + else: + args.gpu_ids = "all" + + if args.multi_gpu and args.num_machines is None: + args.num_machines = defaults.num_machines + + if len(args.gpu_ids.split(",")) < 2 and (args.gpu_ids != "all") and args.multi_gpu and args.num_machines <= 1: + raise ValueError( + "Less than two GPU ids were configured and tried to run on on multiple GPUs. " + "Please ensure at least two are specified for `--gpu_ids`, or use `--gpu_ids='all'`." + ) + if defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE: + # Update args with the defaults + for name, attr in defaults.__dict__.items(): + if isinstance(attr, dict): + for k in defaults.deepspeed_config: + setattr(args, k, defaults.deepspeed_config[k]) + for k in defaults.fsdp_config: + arg_to_set = k + if "fsdp" not in arg_to_set: + arg_to_set = "fsdp_" + arg_to_set + setattr(args, arg_to_set, defaults.fsdp_config[k]) + for k in defaults.megatron_lm_config: + setattr(args, k, defaults.megatron_lm_config[k]) + for k in defaults.dynamo_config: + setattr(args, k, defaults.dynamo_config[k]) + for k in defaults.ipex_config: + setattr(args, k, defaults.ipex_config[k]) + for k in defaults.mpirun_config: + setattr(args, k, defaults.mpirun_config[k]) + continue + + # Those args are handled separately + if ( + name not in ["compute_environment", "mixed_precision", "distributed_type"] + and getattr(args, name, None) is None + ): + setattr(args, name, attr) + if not args.debug: + args.debug = defaults.debug + + if not args.mixed_precision: + if defaults.mixed_precision is None: + args.mixed_precision = "no" + else: + args.mixed_precision = defaults.mixed_precision + mp_from_config_flag = True + else: + if args.use_cpu or (args.use_xpu and torch.xpu.is_available()): + native_amp = is_torch_version(">=", "1.10") + else: + native_amp = is_bf16_available(True) + if ( + args.mixed_precision == "bf16" + and not native_amp + and not (args.tpu and is_torch_xla_available(check_is_tpu=True)) + ): + raise ValueError("bf16 mixed precision requires PyTorch >= 1.10 and a supported device.") + + # Silently set the default here + if args.dynamo_backend is None: + args.dynamo_backend = "no" + else: + if args.num_processes is None: + if args.use_xpu and is_xpu_available(): + args.num_processes = torch.xpu.device_count() + elif is_mlu_available(): + args.num_processes = torch.mlu.device_count() + elif is_npu_available(): + args.num_processes = torch.npu.device_count() + else: + args.num_processes = torch.cuda.device_count() + warned.append(f"\t`--num_processes` was set to a value of `{args.num_processes}`") + if args.debug is None: + args.debug = False + if not args.multi_gpu and ( + (args.use_xpu and is_xpu_available() and torch.xpu.device_count() > 1) + or (is_mlu_available() and torch.mlu.device_count() > 1) + or (is_npu_available() and torch.npu.device_count() > 1) + or (torch.cuda.device_count() > 1) + ): + warned.append( + "\t\tMore than one GPU was found, enabling multi-GPU training.\n" + "\t\tIf this was unintended please pass in `--num_processes=1`." + ) + args.multi_gpu = True + if args.num_machines is None: + warned.append("\t`--num_machines` was set to a value of `1`") + args.num_machines = 1 + if args.mixed_precision is None: + warned.append("\t`--mixed_precision` was set to a value of `'no'`") + args.mixed_precision = "no" + if not hasattr(args, "use_cpu"): + args.use_cpu = args.cpu + if args.dynamo_backend is None: + warned.append("\t`--dynamo_backend` was set to a value of `'no'`") + args.dynamo_backend = "no" + if args.debug: + logger.debug("Running script in debug mode, expect distributed operations to be slightly slower.") + + is_aws_env_disabled = defaults is None or ( + defaults is not None and defaults.compute_environment != ComputeEnvironment.AMAZON_SAGEMAKER + ) + if is_aws_env_disabled and args.num_cpu_threads_per_process is None: + args.num_cpu_threads_per_process = 1 + if args.use_cpu and args.num_processes >= 1: + local_size = get_int_from_env( + ["MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], 1 + ) + threads_per_process = int(psutil.cpu_count(logical=False) / local_size) + if threads_per_process > 1: + args.num_cpu_threads_per_process = threads_per_process + warned.append( + f"\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance when training on CPUs" + ) + + if any(warned): + message = "The following values were not passed to `accelerate launch` and had defaults used instead:\n" + message += "\n".join(warned) + message += ( + "\nTo avoid this warning pass in values for each of the problematic parameters or run `accelerate config`." + ) + logger.warning(message) + return args, defaults, mp_from_config_flag + + +def launch_command(args): + args, defaults, mp_from_config_flag = _validate_launch_command(args) + # Use the proper launcher + if args.use_deepspeed and not args.cpu: + args.deepspeed_fields_from_accelerate_config = list(defaults.deepspeed_config.keys()) if defaults else [] + if mp_from_config_flag: + args.deepspeed_fields_from_accelerate_config.append("mixed_precision") + args.deepspeed_fields_from_accelerate_config = ",".join(args.deepspeed_fields_from_accelerate_config) + deepspeed_launcher(args) + elif args.use_fsdp and not args.cpu: + multi_gpu_launcher(args) + elif args.use_megatron_lm and not args.cpu: + multi_gpu_launcher(args) + elif args.multi_gpu and not args.cpu: + multi_gpu_launcher(args) + elif args.tpu and not args.cpu: + if args.tpu_use_cluster: + tpu_pod_launcher(args) + else: + tpu_launcher(args) + elif defaults is not None and defaults.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: + sagemaker_launcher(defaults, args) + else: + simple_launcher(args) + + +def main(): + parser = launch_command_parser() + args = parser.parse_args() + launch_command(args) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/menu/__init__.py b/venv/lib/python3.10/site-packages/accelerate/commands/menu/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c2c851cc0b192ab8207d3fa68d7409868c84354c --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/menu/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from .selection_menu import BulletMenu diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..240aed5c5de7a68659203682584cb1740fa47ac8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/cursor.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/cursor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6c91eae3072177aafe7fe69a95a1c514316ccbf Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/cursor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/helpers.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f7aa086987cc720f3d0415a13aea4cb715347a1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/helpers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/input.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/input.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2159b7eac6dd89bb5ddcf97489bede092b25d66b Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/input.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/keymap.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/keymap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb4746121017cac530d0bfb8d800b47055d40ffe Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/keymap.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/selection_menu.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/selection_menu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..162cf396928081aca1e3144536ba63d9b6716380 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/selection_menu.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/menu/cursor.py b/venv/lib/python3.10/site-packages/accelerate/commands/menu/cursor.py new file mode 100644 index 0000000000000000000000000000000000000000..c1f0bb7b68025ae4fe0c2c76c095eb36b4e64f2c --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/menu/cursor.py @@ -0,0 +1,65 @@ +# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +A utility for showing and hiding the terminal cursor on Windows and Linux, based on https://github.com/bchao1/bullet +""" + +import os +import sys +from contextlib import contextmanager + + +# Windows only +if os.name == "nt": + import ctypes + import msvcrt # noqa + + class CursorInfo(ctypes.Structure): + # _fields is a specific attr expected by ctypes + _fields_ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] + + +def hide_cursor(): + if os.name == "nt": + ci = CursorInfo() + handle = ctypes.windll.kernel32.GetStdHandle(-11) + ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci)) + ci.visible = False + ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci)) + elif os.name == "posix": + sys.stdout.write("\033[?25l") + sys.stdout.flush() + + +def show_cursor(): + if os.name == "nt": + ci = CursorInfo() + handle = ctypes.windll.kernel32.GetStdHandle(-11) + ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci)) + ci.visible = True + ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci)) + elif os.name == "posix": + sys.stdout.write("\033[?25h") + sys.stdout.flush() + + +@contextmanager +def hide(): + "Context manager to hide the terminal cursor" + try: + hide_cursor() + yield + finally: + show_cursor() diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/menu/helpers.py b/venv/lib/python3.10/site-packages/accelerate/commands/menu/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..de46f37ddcf4591167e3e01791391e4b1729034f --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/menu/helpers.py @@ -0,0 +1,59 @@ +# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +A variety of helper functions and constants when dealing with terminal menu choices, based on +https://github.com/bchao1/bullet +""" + +import enum +import shutil +import sys + + +TERMINAL_WIDTH, _ = shutil.get_terminal_size() + +CURSOR_TO_CHAR = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"} + + +class Direction(enum.Enum): + UP = 0 + DOWN = 1 + + +def forceWrite(content, end=""): + sys.stdout.write(str(content) + end) + sys.stdout.flush() + + +def writeColor(content, color, end=""): + forceWrite(f"\u001b[{color}m{content}\u001b[0m", end) + + +def reset_cursor(): + forceWrite("\r") + + +def move_cursor(num_lines: int, direction: str): + forceWrite(f"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}") + + +def clear_line(): + forceWrite(" " * TERMINAL_WIDTH) + reset_cursor() + + +def linebreak(): + reset_cursor() + forceWrite("-" * TERMINAL_WIDTH) diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/menu/input.py b/venv/lib/python3.10/site-packages/accelerate/commands/menu/input.py new file mode 100644 index 0000000000000000000000000000000000000000..2690f86aa61f7ac648f4a9c2040a34ee35147201 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/menu/input.py @@ -0,0 +1,86 @@ +# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This file contains utilities for handling input from the user and registering specific keys to specific functions, +based on https://github.com/bchao1/bullet +""" + +from typing import List + +from .keymap import KEYMAP, get_character + + +def mark(key: str): + """ + Mark the function with the key code so it can be handled in the register + """ + + def decorator(func): + handle = getattr(func, "handle_key", []) + handle += [key] + func.handle_key = handle + return func + + return decorator + + +def mark_multiple(*keys: List[str]): + """ + Mark the function with the key codes so it can be handled in the register + """ + + def decorator(func): + handle = getattr(func, "handle_key", []) + handle += keys + func.handle_key = handle + return func + + return decorator + + +class KeyHandler(type): + """ + Metaclass that adds the key handlers to the class + """ + + def __new__(cls, name, bases, attrs): + new_cls = super().__new__(cls, name, bases, attrs) + if not hasattr(new_cls, "key_handler"): + new_cls.key_handler = {} + new_cls.handle_input = KeyHandler.handle_input + + for value in attrs.values(): + handled_keys = getattr(value, "handle_key", []) + for key in handled_keys: + new_cls.key_handler[key] = value + return new_cls + + @staticmethod + def handle_input(cls): + "Finds and returns the selected character if it exists in the handler" + char = get_character() + if char != KEYMAP["undefined"]: + char = ord(char) + handler = cls.key_handler.get(char) + if handler: + cls.current_selection = char + return handler(cls) + else: + return None + + +def register(cls): + """Adds KeyHandler metaclass to the class""" + return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy()) diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/menu/keymap.py b/venv/lib/python3.10/site-packages/accelerate/commands/menu/keymap.py new file mode 100644 index 0000000000000000000000000000000000000000..787db12860fe21c6786dda69c34fcccab114f2f8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/menu/keymap.py @@ -0,0 +1,133 @@ +# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Utilities relating to parsing raw characters from the keyboard, based on https://github.com/bchao1/bullet +""" + +import os +import string +import sys + + +ARROW_KEY_FLAG = 1 << 8 + +KEYMAP = { + "tab": ord("\t"), + "newline": ord("\r"), + "esc": 27, + "up": 65 + ARROW_KEY_FLAG, + "down": 66 + ARROW_KEY_FLAG, + "right": 67 + ARROW_KEY_FLAG, + "left": 68 + ARROW_KEY_FLAG, + "mod_int": 91, + "undefined": sys.maxsize, + "interrupt": 3, + "insert": 50, + "delete": 51, + "pg_up": 53, + "pg_down": 54, +} + +KEYMAP["arrow_begin"] = KEYMAP["up"] +KEYMAP["arrow_end"] = KEYMAP["left"] + +if sys.platform == "win32": + WIN_CH_BUFFER = [] + WIN_KEYMAP = { + b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG, + b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG, + b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG, + b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG, + b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG, + b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG, + b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG, + b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG, + } + +for i in range(10): + KEYMAP[str(i)] = ord(str(i)) + + +def get_raw_chars(): + "Gets raw characters from inputs" + if os.name == "nt": + import msvcrt + + encoding = "mbcs" + # Flush the keyboard buffer + while msvcrt.kbhit(): + msvcrt.getch() + if len(WIN_CH_BUFFER) == 0: + # Read the keystroke + ch = msvcrt.getch() + + # If it is a prefix char, get second part + if ch in (b"\x00", b"\xe0"): + ch2 = ch + msvcrt.getch() + # Translate actual Win chars to bullet char types + try: + chx = chr(WIN_KEYMAP[ch2]) + WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"])) + WIN_CH_BUFFER.append(chx) + if ord(chx) in ( + KEYMAP["insert"] - 1 << 9, + KEYMAP["delete"] - 1 << 9, + KEYMAP["pg_up"] - 1 << 9, + KEYMAP["pg_down"] - 1 << 9, + ): + WIN_CH_BUFFER.append(chr(126)) + ch = chr(KEYMAP["esc"]) + except KeyError: + ch = ch2[1] + else: + ch = ch.decode(encoding) + else: + ch = WIN_CH_BUFFER.pop(0) + elif os.name == "posix": + import termios + import tty + + fd = sys.stdin.fileno() + old_settings = termios.tcgetattr(fd) + try: + tty.setraw(fd) + ch = sys.stdin.read(1) + finally: + termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) + return ch + + +def get_character(): + "Gets a character from the keyboard and returns the key code" + char = get_raw_chars() + if ord(char) in [KEYMAP["interrupt"], KEYMAP["newline"]]: + return char + + elif ord(char) == KEYMAP["esc"]: + combo = get_raw_chars() + if ord(combo) == KEYMAP["mod_int"]: + key = get_raw_chars() + if ord(key) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(key) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: + return chr(ord(key) + ARROW_KEY_FLAG) + else: + return KEYMAP["undefined"] + else: + return get_raw_chars() + + else: + if char in string.printable: + return char + else: + return KEYMAP["undefined"] diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/menu/selection_menu.py b/venv/lib/python3.10/site-packages/accelerate/commands/menu/selection_menu.py new file mode 100644 index 0000000000000000000000000000000000000000..ee9a771a54ef666ee46b67ae6c75fb957d49efdd --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/menu/selection_menu.py @@ -0,0 +1,144 @@ +# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Main driver for the selection menu, based on https://github.com/bchao1/bullet +""" + +import builtins +import sys + +from ...utils.imports import _is_package_available +from . import cursor, input +from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor +from .keymap import KEYMAP + + +in_colab = False +try: + in_colab = _is_package_available("google.colab") +except ModuleNotFoundError: + pass + + +@input.register +class BulletMenu: + """ + A CLI menu to select a choice from a list of choices using the keyboard. + """ + + def __init__(self, prompt: str = None, choices: list = []): + self.position = 0 + self.choices = choices + self.prompt = prompt + if sys.platform == "win32": + self.arrow_char = "*" + else: + self.arrow_char = "➔ " + + def write_choice(self, index, end: str = ""): + if sys.platform != "win32": + writeColor(self.choices[index], 32, end) + else: + forceWrite(self.choices[index], end) + + def print_choice(self, index: int): + "Prints the choice at the given index" + if index == self.position: + forceWrite(f" {self.arrow_char} ") + self.write_choice(index) + else: + forceWrite(f" {self.choices[index]}") + reset_cursor() + + def move_direction(self, direction: Direction, num_spaces: int = 1): + "Should not be directly called, used to move a direction of either up or down" + old_position = self.position + if direction == Direction.DOWN: + if self.position + 1 >= len(self.choices): + return + self.position += num_spaces + else: + if self.position - 1 < 0: + return + self.position -= num_spaces + clear_line() + self.print_choice(old_position) + move_cursor(num_spaces, direction.name) + self.print_choice(self.position) + + @input.mark(KEYMAP["up"]) + def move_up(self): + self.move_direction(Direction.UP) + + @input.mark(KEYMAP["down"]) + def move_down(self): + self.move_direction(Direction.DOWN) + + @input.mark(KEYMAP["newline"]) + def select(self): + move_cursor(len(self.choices) - self.position, "DOWN") + return self.position + + @input.mark(KEYMAP["interrupt"]) + def interrupt(self): + move_cursor(len(self.choices) - self.position, "DOWN") + raise KeyboardInterrupt + + @input.mark_multiple(*[KEYMAP[str(number)] for number in range(10)]) + def select_row(self): + index = int(chr(self.current_selection)) + movement = index - self.position + if index == self.position: + return + if index < len(self.choices): + if self.position > index: + self.move_direction(Direction.UP, -movement) + elif self.position < index: + self.move_direction(Direction.DOWN, movement) + else: + return + else: + return + + def run(self, default_choice: int = 0): + "Start the menu and return the selected choice" + if self.prompt: + linebreak() + forceWrite(self.prompt, "\n") + if in_colab: + forceWrite("Please input a choice index (starting from 0), and press enter", "\n") + else: + forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n") + self.position = default_choice + for i in range(len(self.choices)): + self.print_choice(i) + forceWrite("\n") + move_cursor(len(self.choices) - self.position, "UP") + with cursor.hide(): + while True: + if in_colab: + try: + choice = int(builtins.input()) + except ValueError: + choice = default_choice + else: + choice = self.handle_input() + if choice is not None: + reset_cursor() + for _ in range(len(self.choices) + 1): + move_cursor(1, "UP") + clear_line() + self.write_choice(choice, "\n") + return choice diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/test.py b/venv/lib/python3.10/site-packages/accelerate/commands/test.py new file mode 100644 index 0000000000000000000000000000000000000000..a0d2f7bcf14727aa13e3438f4cd6e6f140f5bb2f --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/test.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +from accelerate.test_utils import execute_subprocess_async, path_in_accelerate_package + + +def test_command_parser(subparsers=None): + if subparsers is not None: + parser = subparsers.add_parser("test") + else: + parser = argparse.ArgumentParser("Accelerate test command") + + parser.add_argument( + "--config_file", + default=None, + help=( + "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " + "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " + "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " + "with 'huggingface'." + ), + ) + + if subparsers is not None: + parser.set_defaults(func=test_command) + return parser + + +def test_command(args): + script_name = path_in_accelerate_package("test_utils", "scripts", "test_script.py") + + if args.config_file is None: + test_args = [script_name] + else: + test_args = f"--config_file={args.config_file} {script_name}".split() + + cmd = ["accelerate-launch"] + test_args + result = execute_subprocess_async(cmd) + if result.returncode == 0: + print("Test is a success! You are ready for your distributed training!") + + +def main(): + parser = test_command_parser() + args = parser.parse_args() + test_command(args) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/tpu.py b/venv/lib/python3.10/site-packages/accelerate/commands/tpu.py new file mode 100644 index 0000000000000000000000000000000000000000..fc0f07bf8697bfdb6484d3bf817f2e18b1313b00 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/tpu.py @@ -0,0 +1,157 @@ +#!/usr/bin/env python + +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os +import subprocess + +from packaging.version import Version, parse + +from accelerate.commands.config.config_args import default_config_file, load_config_from_file + + +_description = "Run commands across TPU VMs for initial setup before running `accelerate launch`." + + +def tpu_command_parser(subparsers=None): + if subparsers is not None: + parser = subparsers.add_parser("tpu-config", description=_description) + else: + parser = argparse.ArgumentParser("Accelerate tpu-config command", description=_description) + # Core arguments + config_args = parser.add_argument_group( + "Config Arguments", "Arguments that can be configured through `accelerate config`." + ) + config_args.add_argument( + "--config_file", + type=str, + default=None, + help="Path to the config file to use for accelerate.", + ) + config_args.add_argument( + "--tpu_name", + default=None, + help="The name of the TPU to use. If not specified, will use the TPU specified in the config file.", + ) + config_args.add_argument( + "--tpu_zone", + default=None, + help="The zone of the TPU to use. If not specified, will use the zone specified in the config file.", + ) + pod_args = parser.add_argument_group("TPU Arguments", "Arguments for options ran inside the TPU.") + pod_args.add_argument( + "--use_alpha", + action="store_true", + help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.", + ) + pod_args.add_argument( + "--command_file", + default=None, + help="The path to the file containing the commands to run on the pod on startup.", + ) + pod_args.add_argument( + "--command", + action="append", + nargs="+", + help="A command to run on the pod. Can be passed multiple times.", + ) + pod_args.add_argument( + "--install_accelerate", + action="store_true", + help="Whether to install accelerate on the pod. Defaults to False.", + ) + pod_args.add_argument( + "--accelerate_version", + default="latest", + help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.", + ) + pod_args.add_argument( + "--debug", action="store_true", help="If set, will print the command that would be run instead of running it." + ) + + if subparsers is not None: + parser.set_defaults(func=tpu_command_launcher) + return parser + + +def tpu_command_launcher(args): + defaults = None + + # Get the default from the config file if it exists. + if args.config_file is not None or os.path.isfile(default_config_file): + defaults = load_config_from_file(args.config_file) + if not args.command_file and defaults.command_file is not None and not args.command: + args.command_file = defaults.command_file + if not args.command and defaults.commands is not None: + args.command = defaults.commands + if not args.tpu_name: + args.tpu_name = defaults.tpu_name + if not args.tpu_zone: + args.tpu_zone = defaults.tpu_zone + if args.accelerate_version == "dev": + args.accelerate_version = "git+https://github.com/huggingface/accelerate.git" + elif args.accelerate_version == "latest": + args.accelerate_version = "accelerate -U" + elif isinstance(parse(args.accelerate_version), Version): + args.accelerate_version = f"accelerate=={args.accelerate_version}" + + if not args.command_file and not args.command: + raise ValueError("You must specify either a command file or a command to run on the pod.") + + if args.command_file: + with open(args.command_file) as f: + args.command = [f.read().splitlines()] + + # To turn list of lists into list of strings + if isinstance(args.command[0], list): + args.command = [line for cmd in args.command for line in cmd] + # Default to the shared folder and install accelerate + new_cmd = ["cd /usr/share"] + if args.install_accelerate: + new_cmd += [f"pip install {args.accelerate_version}"] + new_cmd += args.command + args.command = "; ".join(new_cmd) + + # Then send it to gcloud + # Eventually try to use google-api-core to do this instead of subprocess + cmd = ["gcloud"] + if args.use_alpha: + cmd += ["alpha"] + cmd += [ + "compute", + "tpus", + "tpu-vm", + "ssh", + args.tpu_name, + "--zone", + args.tpu_zone, + "--command", + args.command, + "--worker", + "all", + ] + if args.debug: + print(f"Running {' '.join(cmd)}") + return + subprocess.run(cmd) + print("Successfully setup pod.") + + +def main(): + parser = tpu_command_parser() + args = parser.parse_args() + + tpu_command_launcher(args) diff --git a/venv/lib/python3.10/site-packages/accelerate/commands/utils.py b/venv/lib/python3.10/site-packages/accelerate/commands/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b65215fac7666b475af98b17e264ef6701239bc1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/commands/utils.py @@ -0,0 +1,120 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + + +class _StoreAction(argparse.Action): + """ + Custom action that allows for `-` or `_` to be passed in for an argument. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + new_option_strings = [] + for option_string in self.option_strings: + new_option_strings.append(option_string) + if "_" in option_string[2:]: + # Add `-` version to the option string + new_option_strings.append(option_string.replace("_", "-")) + self.option_strings = new_option_strings + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, values) + + +class _StoreConstAction(_StoreAction): + """ + Same as `argparse._StoreConstAction` but uses the custom `_StoreAction`. + """ + + def __init__(self, option_strings, dest, const, default=None, required=False, help=None): + super().__init__( + option_strings=option_strings, + dest=dest, + nargs=0, + const=const, + default=default, + required=required, + help=help, + ) + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, self.const) + + +class _StoreTrueAction(_StoreConstAction): + """ + Same as `argparse._StoreTrueAction` but uses the custom `_StoreConstAction`. + """ + + def __init__( + self, + option_strings, + dest, + default=None, + required=False, + help=None, + ): + super().__init__( + option_strings=option_strings, dest=dest, const=True, default=default, required=required, help=help + ) + + +class CustomArgumentGroup(argparse._ArgumentGroup): + """ + Custom argument group that allows for the use of `-` or `_` in arguments passed and overrides the help for each + when applicable. + """ + + def _add_action(self, action): + args = vars(action) + if isinstance(action, argparse._StoreTrueAction): + action = _StoreTrueAction( + args["option_strings"], args["dest"], args["default"], args["required"], args["help"] + ) + elif isinstance(action, argparse._StoreConstAction): + action = _StoreConstAction( + args["option_strings"], + args["dest"], + args["const"], + args["default"], + args["required"], + args["help"], + ) + elif isinstance(action, argparse._StoreAction): + action = _StoreAction(**args) + action = super()._add_action(action) + return action + + +class CustomArgumentParser(argparse.ArgumentParser): + """ + Custom argument parser that allows for the use of `-` or `_` in arguments passed and overrides the help for each + when applicable. + """ + + def add_argument(self, *args, **kwargs): + if "action" in kwargs: + # Translate action -> class + if kwargs["action"] == "store_true": + kwargs["action"] = _StoreTrueAction + else: + kwargs["action"] = _StoreAction + super().add_argument(*args, **kwargs) + + def add_argument_group(self, *args, **kwargs): + group = CustomArgumentGroup(self, *args, **kwargs) + self._action_groups.append(group) + return group diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__init__.py b/venv/lib/python3.10/site-packages/accelerate/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..50baa32fdbca940857055445c1988c16b9f01b6a --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/__init__.py @@ -0,0 +1,225 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from .constants import ( + MODEL_NAME, + OPTIMIZER_NAME, + RNG_STATE_NAME, + SAFE_MODEL_NAME, + SAFE_WEIGHTS_INDEX_NAME, + SAFE_WEIGHTS_NAME, + SAMPLER_NAME, + SCALER_NAME, + SCHEDULER_NAME, + TORCH_DISTRIBUTED_OPERATION_TYPES, + TORCH_LAUNCH_PARAMS, + WEIGHTS_INDEX_NAME, + WEIGHTS_NAME, +) +from .dataclasses import ( + AutocastKwargs, + BnbQuantizationConfig, + ComputeEnvironment, + CustomDtype, + DataLoaderConfiguration, + DeepSpeedPlugin, + DistributedDataParallelKwargs, + DistributedType, + DynamoBackend, + FP8RecipeKwargs, + FullyShardedDataParallelPlugin, + GradientAccumulationPlugin, + GradScalerKwargs, + InitProcessGroupKwargs, + KwargsHandler, + LoggerType, + MegatronLMPlugin, + PrecisionType, + ProjectConfiguration, + RNGType, + SageMakerDistributedType, + TensorInformation, + TorchDynamoPlugin, +) +from .environment import ( + are_libraries_initialized, + check_cuda_p2p_ib_support, + check_fp8_capability, + convert_dict_to_env_variables, + get_cpu_distributed_information, + get_gpu_info, + get_int_from_env, + parse_choice_from_env, + parse_flag_from_env, + set_numa_affinity, + str_to_bool, +) +from .imports import ( + get_ccl_version, + is_4bit_bnb_available, + is_8bit_bnb_available, + is_aim_available, + is_bf16_available, + is_bnb_available, + is_boto3_available, + is_ccl_available, + is_clearml_available, + is_comet_ml_available, + is_cuda_available, + is_datasets_available, + is_deepspeed_available, + is_dvclive_available, + is_fp8_available, + is_ipex_available, + is_megatron_lm_available, + is_mlflow_available, + is_mlu_available, + is_mps_available, + is_msamp_available, + is_npu_available, + is_pandas_available, + is_peft_available, + is_pippy_available, + is_pynvml_available, + is_rich_available, + is_sagemaker_available, + is_tensorboard_available, + is_timm_available, + is_torch_xla_available, + is_transformer_engine_available, + is_transformers_available, + is_wandb_available, + is_xpu_available, +) +from .modeling import ( + calculate_maximum_sizes, + check_device_map, + check_tied_parameters_in_config, + check_tied_parameters_on_same_device, + compute_module_sizes, + convert_file_size_to_int, + dtype_byte_size, + find_tied_parameters, + get_balanced_memory, + get_max_layer_size, + get_max_memory, + get_mixed_precision_context_manager, + id_tensor_storage, + infer_auto_device_map, + is_peft_model, + load_checkpoint_in_model, + load_offloaded_weights, + load_state_dict, + named_module_tensors, + retie_parameters, + set_module_tensor_to_device, + shard_checkpoint, +) +from .offload import ( + OffloadedWeightsLoader, + PrefixedDataset, + extract_submodules_state_dict, + load_offloaded_weight, + offload_state_dict, + offload_weight, + save_offload_index, +) +from .operations import ( + CannotPadNestedTensorWarning, + broadcast, + broadcast_object_list, + concatenate, + convert_outputs_to_fp32, + convert_to_fp32, + copy_tensor_to_devices, + find_batch_size, + find_device, + gather, + gather_object, + get_data_structure, + honor_type, + ignorant_find_batch_size, + initialize_tensors, + is_namedtuple, + is_tensor_information, + is_torch_tensor, + listify, + pad_across_processes, + pad_input_tensors, + recursively_apply, + reduce, + send_to_device, + slice_tensors, +) +from .versions import compare_versions, is_torch_version + + +if is_deepspeed_available(): + from .deepspeed import ( + DeepSpeedEngineWrapper, + DeepSpeedOptimizerWrapper, + DeepSpeedSchedulerWrapper, + DummyOptim, + DummyScheduler, + HfDeepSpeedConfig, + ) + +from .bnb import has_4bit_bnb_layers, load_and_quantize_model +from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer +from .launch import ( + PrepareForLaunch, + _filter_args, + prepare_deepspeed_cmd_env, + prepare_multi_gpu_env, + prepare_sagemager_args_inputs, + prepare_simple_launcher_cmd_env, + prepare_tpu, +) +from .megatron_lm import ( + AbstractTrainStep, + BertTrainStep, + GPTTrainStep, + MegatronEngine, + MegatronLMDummyDataLoader, + MegatronLMDummyScheduler, + MegatronLMOptimizerWrapper, + MegatronLMSchedulerWrapper, + T5TrainStep, + avg_losses_across_data_parallel_group, + gather_across_data_parallel_groups, +) +from .megatron_lm import initialize as megatron_lm_initialize +from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader +from .megatron_lm import prepare_model as megatron_lm_prepare_model +from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer +from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler +from .memory import find_executable_batch_size, release_memory +from .other import ( + check_os_kernel, + clean_state_dict_for_safetensors, + clear_environment, + convert_bytes, + extract_model_from_parallel, + get_pretty_name, + is_port_in_use, + merge_dicts, + patch_environment, + recursive_getattr, + save, + wait_for_everyone, + write_basic_config, +) +from .random import set_seed, synchronize_rng_state, synchronize_rng_states +from .torch_xla import install_xla +from .tqdm import tqdm +from .transformer_engine import convert_model, has_transformer_engine_layers diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8f34a53a8f6f78f954c390a95e0ae23231453b7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/bnb.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/bnb.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23e5c5e5c9ef9fbd5bb63b13aabfcdfdd7363480 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/bnb.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/constants.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4289431cf8ac7d9ca9cde7f6e9deabc1f6e75a8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/constants.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/dataclasses.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/dataclasses.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..276799c75c7b3b5b7e9335bf91029b7216405967 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/dataclasses.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/deepspeed.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/deepspeed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82dee0d5ba214b7163e011e63f8b5ee9681387ec Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/deepspeed.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/environment.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/environment.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1cce1d25a5900d61fc7f31a86f870ab6aa528137 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/environment.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/fsdp_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/fsdp_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02d205791876ffa8842d9fd36009574bf12a66aa Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/fsdp_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/imports.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/imports.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..772f347ed0d54a257ce2b6b442197fda8e22d6b8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/imports.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/launch.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/launch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ee9b2281a6c738506cc091132db5a787c064b58 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/launch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/megatron_lm.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/megatron_lm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67481422e00dfff21432abd05ec36e99d6e2f571 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/megatron_lm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/memory.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/memory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9adc5c85e9372297b8e9a979911e95a5fc5cd4a Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/memory.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/modeling.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/modeling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79ee2abf3ab7a497aedff67ac3d2d1c90f2c05a2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/modeling.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/offload.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/offload.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f261f893cb9b6ca68ba7832b185eab55a3d55b2d Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/offload.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/operations.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/operations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c02a38ac888988561693b437f00c1c61478861e1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/operations.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/other.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/other.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9cecf45cb174853ae3045993eccc85563cffe087 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/other.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/random.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/random.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96e51c6453a70e3b7ad91c3ccfd03a12f2b16837 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/random.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/rich.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/rich.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a58c29e3209a55e0a07e54e734c64845d35ea20e Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/rich.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/torch_xla.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/torch_xla.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea316b9c0448d26aa6c4bcb5b961b3e6736b5a33 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/torch_xla.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/tqdm.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/tqdm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97ede214e20b4ba68ea5f58a8e724957ad7e2aba Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/tqdm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/transformer_engine.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/transformer_engine.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f37e04da162e50937fea669ec5a14a0cabaa6f2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/transformer_engine.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/versions.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/versions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd1fc9908d97e2cab781479212603101102db1d7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/versions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/constants.py b/venv/lib/python3.10/site-packages/accelerate/utils/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..8c299570757cb6a5df93f4794e403d1581dd7c2e --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/constants.py @@ -0,0 +1,72 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import operator as op + + +SCALER_NAME = "scaler.pt" +MODEL_NAME = "pytorch_model" +SAFE_MODEL_NAME = "model" +RNG_STATE_NAME = "random_states" +OPTIMIZER_NAME = "optimizer" +SCHEDULER_NAME = "scheduler" +SAMPLER_NAME = "sampler" +WEIGHTS_NAME = f"{MODEL_NAME}.bin" +WEIGHTS_INDEX_NAME = f"{WEIGHTS_NAME}.index.json" +SAFE_WEIGHTS_NAME = f"{SAFE_MODEL_NAME}.safetensors" +SAFE_WEIGHTS_INDEX_NAME = f"{SAFE_WEIGHTS_NAME}.index.json" +SAGEMAKER_PYTORCH_VERSION = "1.10.2" +SAGEMAKER_PYTHON_VERSION = "py38" +SAGEMAKER_TRANSFORMERS_VERSION = "4.17.0" +SAGEMAKER_PARALLEL_EC2_INSTANCES = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"] +FSDP_SHARDING_STRATEGY = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"] +FSDP_AUTO_WRAP_POLICY = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"] +FSDP_BACKWARD_PREFETCH = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"] +FSDP_STATE_DICT_TYPE = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"] +FSDP_PYTORCH_VERSION = "2.1.0" +FSDP_MODEL_NAME = "pytorch_model_fsdp" +DEEPSPEED_MULTINODE_LAUNCHERS = ["pdsh", "standard", "openmpi", "mvapich", "mpich"] +TORCH_DYNAMO_MODES = ["default", "reduce-overhead", "max-autotune"] + +STR_OPERATION_TO_FUNC = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt} + +# These are the args for `torch.distributed.launch` for pytorch < 1.9 +TORCH_LAUNCH_PARAMS = [ + "nnodes", + "nproc_per_node", + "rdzv_backend", + "rdzv_endpoint", + "rdzv_id", + "rdzv_conf", + "standalone", + "max_restarts", + "monitor_interval", + "start_method", + "role", + "module", + "m", + "no_python", + "run_path", + "log_dir", + "r", + "redirects", + "t", + "tee", + "node_rank", + "master_addr", + "master_port", +] + +CUDA_DISTRIBUTED_TYPES = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"] +TORCH_DISTRIBUTED_OPERATION_TYPES = CUDA_DISTRIBUTED_TYPES + ["MULTI_NPU", "MULTI_MLU", "MULTI_XPU", "MULTI_CPU"] diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/deepspeed.py b/venv/lib/python3.10/site-packages/accelerate/utils/deepspeed.py new file mode 100644 index 0000000000000000000000000000000000000000..fe5a63fc7314d42f68baae41cf56f9abc94237a0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/deepspeed.py @@ -0,0 +1,271 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import json +import os +from copy import deepcopy + +from ..optimizer import AcceleratedOptimizer +from ..scheduler import AcceleratedScheduler + + +class HfDeepSpeedConfig: + """ + This object contains a DeepSpeed configuration dictionary and can be quickly queried for things like zero stage. + + A `weakref` of this object is stored in the module's globals to be able to access the config from areas where + things like the Trainer object is not available (e.g. `from_pretrained` and `_get_resized_embeddings`). Therefore + it's important that this object remains alive while the program is still running. + + [`Trainer`] uses the `HfTrainerDeepSpeedConfig` subclass instead. That subclass has logic to sync the configuration + with values of [`TrainingArguments`] by replacing special placeholder values: `"auto"`. Without this special logic + the DeepSpeed configuration is not modified in any way. + + Args: + config_file_or_dict (`Union[str, Dict]`): path to DeepSpeed config file or dict. + + """ + + def __init__(self, config_file_or_dict): + if isinstance(config_file_or_dict, dict): + # Don't modify user's data should they want to reuse it (e.g. in tests), because once we + # modified it, it will not be accepted here again, since `auto` values would have been overridden + config = deepcopy(config_file_or_dict) + elif os.path.exists(config_file_or_dict): + with open(config_file_or_dict, encoding="utf-8") as f: + config = json.load(f) + else: + try: + config_decoded = base64.urlsafe_b64decode(config_file_or_dict).decode("utf-8") + config = json.loads(config_decoded) + except (UnicodeDecodeError, AttributeError, ValueError): + raise ValueError( + f"Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}" + ) + + self.config = config + + self.set_stage_and_offload() + + def set_stage_and_offload(self): + # zero stage - this is done as early as possible, before model is created, to allow + # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object + # during ``zero.Init()`` which needs to know the dtype, and some other hparams. + self._stage = self.get_value("zero_optimization.stage", -1) + + # offload + self._offload = False + if self.is_zero2() or self.is_zero3(): + offload_devices_valid = set(["cpu", "nvme"]) + offload_devices = set( + [ + self.get_value("zero_optimization.offload_optimizer.device"), + self.get_value("zero_optimization.offload_param.device"), + ] + ) + if len(offload_devices & offload_devices_valid) > 0: + self._offload = True + + def find_config_node(self, ds_key_long): + config = self.config + + # find the config node of interest if it exists + nodes = ds_key_long.split(".") + ds_key = nodes.pop() + for node in nodes: + config = config.get(node) + if config is None: + return None, ds_key + + return config, ds_key + + def get_value(self, ds_key_long, default=None): + """ + Returns the set value or `default` if no value is set + """ + config, ds_key = self.find_config_node(ds_key_long) + if config is None: + return default + return config.get(ds_key, default) + + def del_config_sub_tree(self, ds_key_long, must_exist=False): + """ + Deletes a sub-section of the config file if it's found. + + Unless `must_exist` is `True` the section doesn't have to exist. + """ + config = self.config + + # find the config node of interest if it exists + nodes = ds_key_long.split(".") + for node in nodes: + parent_config = config + config = config.get(node) + if config is None: + if must_exist: + raise ValueError(f"Can't find {ds_key_long} entry in the config: {self.config}") + else: + return + + # if found remove it + if parent_config is not None: + parent_config.pop(node) + + def is_true(self, ds_key_long): + """ + Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very + specific question of whether the value is set to `True` (and it's not set to `False`` or isn't set). + + """ + value = self.get_value(ds_key_long) + return False if value is None else bool(value) + + def is_false(self, ds_key_long): + """ + Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very + specific question of whether the value is set to `False` (and it's not set to `True`` or isn't set). + """ + value = self.get_value(ds_key_long) + return False if value is None else not bool(value) + + def is_zero2(self): + return self._stage == 2 + + def is_zero3(self): + return self._stage == 3 + + def is_offload(self): + return self._offload + + +class DeepSpeedEngineWrapper: + """ + Internal wrapper for deepspeed.runtime.engine.DeepSpeedEngine. This is used to follow conventional training loop. + + Args: + engine (deepspeed.runtime.engine.DeepSpeedEngine): deepspeed engine to wrap + """ + + def __init__(self, engine): + self.engine = engine + + def backward(self, loss, **kwargs): + # runs backpropagation and handles mixed precision + self.engine.backward(loss, **kwargs) + + # Deepspeed's `engine.step` performs the following operations: + # - gradient accumulation check + # - gradient clipping + # - optimizer step + # - zero grad + # - checking overflow + # - lr_scheduler step (only if engine.lr_scheduler is not None) + self.engine.step() + # and this plugin overrides the above calls with no-ops when Accelerate runs under + # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple + # training loop that works transparently under many training regimes. + + +class DeepSpeedOptimizerWrapper(AcceleratedOptimizer): + """ + Internal wrapper around a deepspeed optimizer. + + Args: + optimizer (`torch.optim.optimizer.Optimizer`): + The optimizer to wrap. + """ + + def __init__(self, optimizer): + super().__init__(optimizer, device_placement=False, scaler=None) + self.__has_overflow__ = hasattr(self.optimizer, "overflow") + + def zero_grad(self, set_to_none=None): + pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed + + def step(self): + pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed + + @property + def step_was_skipped(self): + """Whether or not the optimizer step was done, or skipped because of gradient overflow.""" + if self.__has_overflow__: + return self.optimizer.overflow + return False + + +class DeepSpeedSchedulerWrapper(AcceleratedScheduler): + """ + Internal wrapper around a deepspeed scheduler. + + Args: + scheduler (`torch.optim.lr_scheduler.LambdaLR`): + The scheduler to wrap. + optimizers (one or a list of `torch.optim.Optimizer`): + """ + + def __init__(self, scheduler, optimizers): + super().__init__(scheduler, optimizers) + + def step(self): + pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed + + +class DummyOptim: + """ + Dummy optimizer presents model parameters or param groups, this is primarily used to follow conventional training + loop when optimizer config is specified in the deepspeed config file. + + Args: + lr (float): + Learning rate. + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + weight_decay (float): + Weight decay. + **kwargs (additional keyword arguments, *optional*): + Other arguments. + """ + + def __init__(self, params, lr=0.001, weight_decay=0, **kwargs): + self.params = params + self.lr = lr + self.weight_decay = weight_decay + self.kwargs = kwargs + + +class DummyScheduler: + """ + Dummy scheduler presents model parameters or param groups, this is primarily used to follow conventional training + loop when scheduler config is specified in the deepspeed config file. + + Args: + optimizer (`torch.optim.optimizer.Optimizer`): + The optimizer to wrap. + total_num_steps (int, *optional*): + Total number of steps. + warmup_num_steps (int, *optional*): + Number of steps for warmup. + lr_scheduler_callable (callable, *optional*): + A callable function that creates an LR Scheduler. It accepts only one argument `optimizer`. + **kwargs (additional keyword arguments, *optional*): + Other arguments. + """ + + def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, lr_scheduler_callable=None, **kwargs): + self.optimizer = optimizer + self.total_num_steps = total_num_steps + self.warmup_num_steps = warmup_num_steps + self.lr_scheduler_callable = lr_scheduler_callable + self.kwargs = kwargs diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/fsdp_utils.py b/venv/lib/python3.10/site-packages/accelerate/utils/fsdp_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..01bb54b262b7f00b4bfb0933fc5fe94b24146097 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/fsdp_utils.py @@ -0,0 +1,209 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os + +import torch + +from ..logging import get_logger +from .constants import FSDP_MODEL_NAME, FSDP_PYTORCH_VERSION, OPTIMIZER_NAME +from .imports import is_torch_distributed_available +from .modeling import is_peft_model +from .versions import is_torch_version + + +if is_torch_version(">=", FSDP_PYTORCH_VERSION) and is_torch_distributed_available(): + import torch.distributed.checkpoint as dist_cp + from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner + from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict + from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP + from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType + + +logger = get_logger(__name__) + + +def _get_model_state_dict(model, adapter_only=False): + if adapter_only and is_peft_model(model): + from peft import get_peft_model_state_dict + + return get_peft_model_state_dict(model, adapter_name=model.active_adapter) + else: + return model.state_dict() + + +def _set_model_state_dict(model, state_dict, adapter_only=False): + if adapter_only and is_peft_model(model): + from peft import set_peft_model_state_dict + + return set_peft_model_state_dict(model, state_dict, adapter_name=model.active_adapter) + else: + return model.load_state_dict(state_dict) + + +def save_fsdp_model(fsdp_plugin, accelerator, model, output_dir, model_index=0, adapter_only=False): + os.makedirs(output_dir, exist_ok=True) + + if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: + # FSDP raises error when single GPU is used with `offload_to_cpu=True` for FULL_STATE_DICT + # so, only enable it when num_processes>1 + is_multi_process = accelerator.num_processes > 1 + fsdp_plugin.state_dict_config.offload_to_cpu = is_multi_process + fsdp_plugin.state_dict_config.rank0_only = is_multi_process + + with FSDP.state_dict_type( + model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config + ): + state_dict = _get_model_state_dict(model, adapter_only=adapter_only) + if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: + weights_name = f"{FSDP_MODEL_NAME}.bin" if model_index == 0 else f"{FSDP_MODEL_NAME}_{model_index}.bin" + output_model_file = os.path.join(output_dir, weights_name) + if accelerator.process_index == 0: + logger.info(f"Saving model to {output_model_file}") + torch.save(state_dict, output_model_file) + logger.info(f"Model saved to {output_model_file}") + elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: + weights_name = ( + f"{FSDP_MODEL_NAME}_rank{accelerator.process_index}.bin" + if model_index == 0 + else f"{FSDP_MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin" + ) + output_model_file = os.path.join(output_dir, weights_name) + logger.info(f"Saving model to {output_model_file}") + torch.save(state_dict, output_model_file) + logger.info(f"Model saved to {output_model_file}") + elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: + ckpt_dir = os.path.join(output_dir, f"{FSDP_MODEL_NAME}_{model_index}") + os.makedirs(ckpt_dir, exist_ok=True) + logger.info(f"Saving model to {ckpt_dir}") + state_dict = {"model": state_dict} + + dist_cp.save_state_dict( + state_dict=state_dict, + storage_writer=dist_cp.FileSystemWriter(ckpt_dir), + planner=DefaultSavePlanner(), + ) + logger.info(f"Model saved to {ckpt_dir}") + + +def load_fsdp_model(fsdp_plugin, accelerator, model, input_dir, model_index=0, adapter_only=False): + accelerator.wait_for_everyone() + if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: + # FSDP raises error when single GPU is used with `offload_to_cpu=True` for FULL_STATE_DICT + # so, only enable it when num_processes>1 + is_multi_process = accelerator.num_processes > 1 + fsdp_plugin.state_dict_config.offload_to_cpu = is_multi_process + fsdp_plugin.state_dict_config.rank0_only = is_multi_process + with FSDP.state_dict_type( + model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config + ): + if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: + if type(model) != FSDP and accelerator.process_index != 0: + if not fsdp_plugin.sync_module_states: + raise ValueError( + "Set the `sync_module_states` flag to `True` so that model states are synced across processes when " + "initializing FSDP object" + ) + return + weights_name = f"{FSDP_MODEL_NAME}.bin" if model_index == 0 else f"{FSDP_MODEL_NAME}_{model_index}.bin" + input_model_file = os.path.join(input_dir, weights_name) + logger.info(f"Loading model from {input_model_file}") + state_dict = torch.load(input_model_file) + logger.info(f"Model loaded from {input_model_file}") + elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: + weights_name = ( + f"{FSDP_MODEL_NAME}_rank{accelerator.process_index}.bin" + if model_index == 0 + else f"{FSDP_MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin" + ) + input_model_file = os.path.join(input_dir, weights_name) + logger.info(f"Loading model from {input_model_file}") + state_dict = torch.load(input_model_file) + logger.info(f"Model loaded from {input_model_file}") + elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: + ckpt_dir = ( + os.path.join(input_dir, f"{FSDP_MODEL_NAME}_{model_index}") + if f"{FSDP_MODEL_NAME}" not in input_dir + else input_dir + ) + logger.info(f"Loading model from {ckpt_dir}") + state_dict = {"model": _get_model_state_dict(model, adapter_only=adapter_only)} + dist_cp.load_state_dict( + state_dict=state_dict, + storage_reader=dist_cp.FileSystemReader(ckpt_dir), + planner=DefaultLoadPlanner(), + ) + state_dict = state_dict["model"] + logger.info(f"Model loaded from {ckpt_dir}") + load_result = _set_model_state_dict(model, state_dict, adapter_only=adapter_only) + return load_result + + +def save_fsdp_optimizer(fsdp_plugin, accelerator, optimizer, model, output_dir, optimizer_index=0): + os.makedirs(output_dir, exist_ok=True) + with FSDP.state_dict_type( + model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config + ): + optim_state = FSDP.optim_state_dict(model, optimizer) + if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: + if accelerator.process_index == 0: + optim_state_name = ( + f"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else f"{OPTIMIZER_NAME}_{optimizer_index}.bin" + ) + output_optimizer_file = os.path.join(output_dir, optim_state_name) + logger.info(f"Saving Optimizer state to {output_optimizer_file}") + torch.save(optim_state, output_optimizer_file) + logger.info(f"Optimizer state saved in {output_optimizer_file}") + else: + ckpt_dir = os.path.join(output_dir, f"{OPTIMIZER_NAME}_{optimizer_index}") + os.makedirs(ckpt_dir, exist_ok=True) + logger.info(f"Saving Optimizer state to {ckpt_dir}") + dist_cp.save_state_dict( + state_dict={"optimizer": optim_state}, + storage_writer=dist_cp.FileSystemWriter(ckpt_dir), + planner=DefaultSavePlanner(), + ) + logger.info(f"Optimizer state saved in {ckpt_dir}") + + +def load_fsdp_optimizer(fsdp_plugin, accelerator, optimizer, model, input_dir, optimizer_index=0, adapter_only=False): + accelerator.wait_for_everyone() + with FSDP.state_dict_type( + model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config + ): + if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: + optim_state = None + if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: + optimizer_name = ( + f"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else f"{OPTIMIZER_NAME}_{optimizer_index}.bin" + ) + input_optimizer_file = os.path.join(input_dir, optimizer_name) + logger.info(f"Loading Optimizer state from {input_optimizer_file}") + optim_state = torch.load(input_optimizer_file) + logger.info(f"Optimizer state loaded from {input_optimizer_file}") + else: + ckpt_dir = ( + os.path.join(input_dir, f"{OPTIMIZER_NAME}_{optimizer_index}") + if f"{OPTIMIZER_NAME}" not in input_dir + else input_dir + ) + logger.info(f"Loading Optimizer from {ckpt_dir}") + optim_state = load_sharded_optimizer_state_dict( + model_state_dict=_get_model_state_dict(model, adapter_only=adapter_only), + optimizer_key="optimizer", + storage_reader=dist_cp.FileSystemReader(ckpt_dir), + ) + optim_state = optim_state["optimizer"] + logger.info(f"Optimizer loaded from {ckpt_dir}") + flattened_osd = FSDP.optim_state_dict_to_load(model=model, optim=optimizer, optim_state_dict=optim_state) + optimizer.load_state_dict(flattened_osd) diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/imports.py b/venv/lib/python3.10/site-packages/accelerate/utils/imports.py new file mode 100644 index 0000000000000000000000000000000000000000..1ef57c05d46cd2db4a21854f477c47048f71c2e0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/imports.py @@ -0,0 +1,385 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib +import importlib.metadata +import os +import warnings +from functools import lru_cache + +import torch +from packaging import version +from packaging.version import parse + +from .environment import parse_flag_from_env, str_to_bool +from .versions import compare_versions, is_torch_version + + +# Try to run Torch native job in an environment with TorchXLA installed by setting this value to 0. +USE_TORCH_XLA = parse_flag_from_env("USE_TORCH_XLA", default=True) + +_torch_xla_available = False +if USE_TORCH_XLA: + try: + import torch_xla.core.xla_model as xm # noqa: F401 + import torch_xla.runtime + + _torch_xla_available = True + except ImportError: + pass + +# Keep it for is_tpu_available. It will be removed along with is_tpu_available. +_tpu_available = _torch_xla_available + +# Cache this result has it's a C FFI call which can be pretty time-consuming +_torch_distributed_available = torch.distributed.is_available() + + +def _is_package_available(pkg_name, metadata_name=None): + # Check we're not importing a "pkg_name" directory somewhere but the actual library by trying to grab the version + package_exists = importlib.util.find_spec(pkg_name) is not None + if package_exists: + try: + # Some libraries have different names in the metadata + _ = importlib.metadata.metadata(pkg_name if metadata_name is None else metadata_name) + return True + except importlib.metadata.PackageNotFoundError: + return False + + +def is_torch_distributed_available() -> bool: + return _torch_distributed_available + + +def is_ccl_available(): + try: + pass + except ImportError: + print( + "Intel(R) oneCCL Bindings for PyTorch* is required to run DDP on Intel(R) GPUs, but it is not" + " detected. If you see \"ValueError: Invalid backend: 'ccl'\" error, please install Intel(R) oneCCL" + " Bindings for PyTorch*." + ) + return ( + importlib.util.find_spec("torch_ccl") is not None + or importlib.util.find_spec("oneccl_bindings_for_pytorch") is not None + ) + + +def get_ccl_version(): + return importlib.metadata.version("oneccl_bind_pt") + + +def is_pynvml_available(): + return _is_package_available("pynvml") + + +def is_msamp_available(): + return _is_package_available("msamp", "ms-amp") + + +def is_transformer_engine_available(): + return _is_package_available("transformer_engine") + + +def is_fp8_available(): + return is_msamp_available() or is_transformer_engine_available() + + +def is_cuda_available(): + """ + Checks if `cuda` is available via an `nvml-based` check which won't trigger the drivers and leave cuda + uninitialized. + """ + pytorch_nvml_based_cuda_check_previous_value = os.environ.get("PYTORCH_NVML_BASED_CUDA_CHECK") + try: + os.environ["PYTORCH_NVML_BASED_CUDA_CHECK"] = str(1) + available = torch.cuda.is_available() + finally: + if pytorch_nvml_based_cuda_check_previous_value: + os.environ["PYTORCH_NVML_BASED_CUDA_CHECK"] = pytorch_nvml_based_cuda_check_previous_value + else: + os.environ.pop("PYTORCH_NVML_BASED_CUDA_CHECK", None) + + return available + + +@lru_cache +def is_tpu_available(check_device=True): + "Checks if `torch_xla` is installed and potentially if a TPU is in the environment" + warnings.warn( + "`is_tpu_available` is deprecated and will be removed in v0.27.0. " + "Please use the `is_torch_xla_available` instead.", + FutureWarning, + ) + # Due to bugs on the amp series GPUs, we disable torch-xla on them + if is_cuda_available(): + return False + if check_device: + if _tpu_available: + try: + # Will raise a RuntimeError if no XLA configuration is found + _ = xm.xla_device() + return True + except RuntimeError: + return False + return _tpu_available + + +@lru_cache +def is_torch_xla_available(check_is_tpu=False, check_is_gpu=False): + """ + Check if `torch_xla` is available. To train a native pytorch job in an environment with torch xla installed, set + the USE_TORCH_XLA to false. + """ + assert not (check_is_tpu and check_is_gpu), "The check_is_tpu and check_is_gpu cannot both be true." + + if not _torch_xla_available: + return False + elif check_is_gpu: + return torch_xla.runtime.device_type() in ["GPU", "CUDA"] + elif check_is_tpu: + return torch_xla.runtime.device_type() == "TPU" + + return True + + +def is_deepspeed_available(): + if is_mlu_available(): + return _is_package_available("deepspeed", metadata_name="deepspeed-mlu") + return _is_package_available("deepspeed") + + +def is_pippy_available(): + package_exists = _is_package_available("pippy", "torchpippy") + if package_exists: + pippy_version = version.parse(importlib.metadata.version("torchpippy")) + return compare_versions(pippy_version, ">", "0.1.1") + return False + + +def is_bf16_available(ignore_tpu=False): + "Checks if bf16 is supported, optionally ignoring the TPU" + if is_torch_xla_available(check_is_tpu=True): + return not ignore_tpu + if is_cuda_available(): + return torch.cuda.is_bf16_supported() + return True + + +def is_4bit_bnb_available(): + package_exists = _is_package_available("bitsandbytes") + if package_exists: + bnb_version = version.parse(importlib.metadata.version("bitsandbytes")) + return compare_versions(bnb_version, ">=", "0.39.0") + return False + + +def is_8bit_bnb_available(): + package_exists = _is_package_available("bitsandbytes") + if package_exists: + bnb_version = version.parse(importlib.metadata.version("bitsandbytes")) + return compare_versions(bnb_version, ">=", "0.37.2") + return False + + +def is_bnb_available(): + return _is_package_available("bitsandbytes") + + +def is_megatron_lm_available(): + if str_to_bool(os.environ.get("ACCELERATE_USE_MEGATRON_LM", "False")) == 1: + package_exists = importlib.util.find_spec("megatron") is not None + if package_exists: + try: + megatron_version = parse(importlib.metadata.version("megatron-lm")) + return compare_versions(megatron_version, ">=", "2.2.0") + except Exception as e: + warnings.warn(f"Parse Megatron version failed. Exception:{e}") + return False + + +def is_transformers_available(): + return _is_package_available("transformers") + + +def is_datasets_available(): + return _is_package_available("datasets") + + +def is_peft_available(): + return _is_package_available("peft") + + +def is_timm_available(): + return _is_package_available("timm") + + +def is_aim_available(): + package_exists = _is_package_available("aim") + if package_exists: + aim_version = version.parse(importlib.metadata.version("aim")) + return compare_versions(aim_version, "<", "4.0.0") + return False + + +def is_tensorboard_available(): + return _is_package_available("tensorboard") or _is_package_available("tensorboardX") + + +def is_wandb_available(): + return _is_package_available("wandb") + + +def is_comet_ml_available(): + return _is_package_available("comet_ml") + + +def is_boto3_available(): + return _is_package_available("boto3") + + +def is_rich_available(): + if _is_package_available("rich"): + if "ACCELERATE_DISABLE_RICH" in os.environ: + warnings.warn( + "`ACCELERATE_DISABLE_RICH` is deprecated and will be removed in v0.22.0 and deactivated by default. Please use `ACCELERATE_ENABLE_RICH` if you wish to use `rich`." + ) + return not parse_flag_from_env("ACCELERATE_DISABLE_RICH", False) + return parse_flag_from_env("ACCELERATE_ENABLE_RICH", False) + return False + + +def is_sagemaker_available(): + return _is_package_available("sagemaker") + + +def is_tqdm_available(): + return _is_package_available("tqdm") + + +def is_clearml_available(): + return _is_package_available("clearml") + + +def is_pandas_available(): + return _is_package_available("pandas") + + +def is_mlflow_available(): + if _is_package_available("mlflow"): + return True + + if importlib.util.find_spec("mlflow") is not None: + try: + _ = importlib.metadata.metadata("mlflow-skinny") + return True + except importlib.metadata.PackageNotFoundError: + return False + return False + + +def is_mps_available(): + return is_torch_version(">=", "1.12") and torch.backends.mps.is_available() and torch.backends.mps.is_built() + + +def is_ipex_available(): + def get_major_and_minor_from_version(full_version): + return str(version.parse(full_version).major) + "." + str(version.parse(full_version).minor) + + _torch_version = importlib.metadata.version("torch") + if importlib.util.find_spec("intel_extension_for_pytorch") is None: + return False + _ipex_version = "N/A" + try: + _ipex_version = importlib.metadata.version("intel_extension_for_pytorch") + except importlib.metadata.PackageNotFoundError: + return False + torch_major_and_minor = get_major_and_minor_from_version(_torch_version) + ipex_major_and_minor = get_major_and_minor_from_version(_ipex_version) + if torch_major_and_minor != ipex_major_and_minor: + warnings.warn( + f"Intel Extension for PyTorch {ipex_major_and_minor} needs to work with PyTorch {ipex_major_and_minor}.*," + f" but PyTorch {_torch_version} is found. Please switch to the matching version and run again." + ) + return False + return True + + +@lru_cache +def is_mlu_available(check_device=False): + "Checks if `torch_mlu` is installed and potentially if a MLU is in the environment" + if importlib.util.find_spec("torch_mlu") is None: + return False + + import torch + import torch_mlu # noqa: F401 + + if check_device: + try: + # Will raise a RuntimeError if no MLU is found + _ = torch.mlu.device_count() + return torch.mlu.is_available() + except RuntimeError: + return False + return hasattr(torch, "mlu") and torch.mlu.is_available() + + +@lru_cache +def is_npu_available(check_device=False): + "Checks if `torch_npu` is installed and potentially if a NPU is in the environment" + if importlib.util.find_spec("torch") is None or importlib.util.find_spec("torch_npu") is None: + return False + + import torch + import torch_npu # noqa: F401 + + if check_device: + try: + # Will raise a RuntimeError if no NPU is found + _ = torch.npu.device_count() + return torch.npu.is_available() + except RuntimeError: + return False + return hasattr(torch, "npu") and torch.npu.is_available() + + +@lru_cache +def is_xpu_available(check_device=False): + "check if user disables it explicitly" + if not parse_flag_from_env("ACCELERATE_USE_XPU", default=True): + return False + "Checks if `intel_extension_for_pytorch` is installed and potentially if a XPU is in the environment" + if is_ipex_available(): + import torch + + if is_torch_version("<=", "1.12"): + return False + else: + return False + + import intel_extension_for_pytorch # noqa: F401 + + if check_device: + try: + # Will raise a RuntimeError if no XPU is found + _ = torch.xpu.device_count() + return torch.xpu.is_available() + except RuntimeError: + return False + return hasattr(torch, "xpu") and torch.xpu.is_available() + + +def is_dvclive_available(): + return _is_package_available("dvclive") diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/launch.py b/venv/lib/python3.10/site-packages/accelerate/utils/launch.py new file mode 100644 index 0000000000000000000000000000000000000000..dc074270bf8f8f66f9a963c6990de0dd05e766ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/launch.py @@ -0,0 +1,624 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os +import subprocess +import sys +import warnings +from ast import literal_eval +from shutil import which +from typing import Any, Dict, List, Tuple + +import torch + +from ..commands.config.config_args import SageMakerConfig +from ..utils import ( + DynamoBackend, + PrecisionType, + is_ipex_available, + is_mlu_available, + is_npu_available, + is_torch_xla_available, + is_xpu_available, +) +from ..utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS +from ..utils.other import is_port_in_use, merge_dicts +from .dataclasses import DistributedType, SageMakerDistributedType + + +def _filter_args(args, parser, default_args=[]): + """ + Filters out all `accelerate` specific args + """ + new_args, _ = parser.parse_known_args(default_args) + for key, value in vars(args).items(): + if key in vars(new_args).keys(): + setattr(new_args, key, value) + return new_args + + +def _get_mpirun_args(): + """ + Determines the executable and argument names for mpirun, based on the type of install. The supported MPI programs + are: OpenMPI, Intel MPI, or MVAPICH. + + Returns: Program name and arg names for hostfile, num processes, and processes per node + """ + # Find the MPI program name + mpi_apps = [x for x in ["mpirun", "mpiexec"] if which(x)] + + if len(mpi_apps) == 0: + raise OSError("mpirun or mpiexec were not found. Ensure that Intel MPI, Open MPI, or MVAPICH are installed.") + + # Call the app with the --version flag to determine which MPI app is installed + mpi_app = mpi_apps[0] + mpirun_version = subprocess.check_output([mpi_app, "--version"]) + + if b"Open MPI" in mpirun_version: + return mpi_app, "--hostfile", "-n", "--npernode" + else: + # Intel MPI and MVAPICH both use the same arg names + return mpi_app, "-f", "-n", "-ppn" + + +def prepare_simple_launcher_cmd_env(args: argparse.Namespace) -> Tuple[List[str], Dict[str, str]]: + """ + Prepares and returns the command list and an environment with the correct simple launcher environment variables. + """ + cmd = [] + if args.no_python and args.module: + raise ValueError("--module and --no_python cannot be used together") + + if args.mpirun_hostfile is not None: + mpi_app_name, hostfile_arg, num_proc_arg, proc_per_node_arg = _get_mpirun_args() + mpirun_ccl = getattr(args, "mpirun_ccl", None) + num_machines = args.num_machines + num_processes = getattr(args, "num_processes", None) + nproc_per_node = str(num_processes // num_machines) if num_processes and num_machines else "1" + cmd += [mpi_app_name, hostfile_arg, args.mpirun_hostfile, proc_per_node_arg, nproc_per_node] + if num_processes: + cmd += [num_proc_arg, str(num_processes)] + if not args.no_python: + cmd.append(sys.executable) + if args.module: + cmd.append("-m") + cmd.append(args.training_script) + cmd.extend(args.training_script_args) + + current_env = os.environ.copy() + current_env["ACCELERATE_USE_CPU"] = str(args.cpu or args.use_cpu) + if args.debug: + current_env["ACCELERATE_DEBUG_MODE"] = "true" + if args.gpu_ids != "all" and args.gpu_ids is not None: + if is_xpu_available(): + current_env["ZE_AFFINITY_MASK"] = args.gpu_ids + elif is_mlu_available(): + current_env["MLU_VISIBLE_DEVICES"] = args.gpu_ids + elif is_npu_available(): + current_env["ASCEND_RT_VISIBLE_DEVICES"] = args.gpu_ids + else: + current_env["CUDA_VISIBLE_DEVICES"] = args.gpu_ids + if args.num_machines > 1: + current_env["MASTER_ADDR"] = args.main_process_ip + current_env["MASTER_PORT"] = str(args.main_process_port) + + if args.mpirun_hostfile is not None: + current_env["CCL_WORKER_COUNT"] = mpirun_ccl + elif args.num_processes > 1: + current_env["MASTER_ADDR"] = args.main_process_ip if args.main_process_ip is not None else "127.0.0.1" + current_env["MASTER_PORT"] = str(args.main_process_port) if args.main_process_port is not None else "29500" + + try: + mixed_precision = PrecisionType(args.mixed_precision.lower()) + except ValueError: + raise ValueError( + f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." + ) + + current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision) + + try: + dynamo_backend = DynamoBackend(args.dynamo_backend.upper()) + except ValueError: + raise ValueError( + f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}." + ) + current_env["ACCELERATE_DYNAMO_BACKEND"] = dynamo_backend.value + current_env["ACCELERATE_DYNAMO_MODE"] = args.dynamo_mode + current_env["ACCELERATE_DYNAMO_USE_FULLGRAPH"] = str(args.dynamo_use_fullgraph) + current_env["ACCELERATE_DYNAMO_USE_DYNAMIC"] = str(args.dynamo_use_dynamic) + + current_env["OMP_NUM_THREADS"] = str(args.num_cpu_threads_per_process) + if is_ipex_available(): + current_env["ACCELERATE_USE_IPEX"] = str(args.ipex).lower() + current_env["ACCELERATE_USE_XPU"] = str(args.use_xpu).lower() + if args.enable_cpu_affinity: + current_env["ACCELERATE_CPU_AFFINITY"] = "1" + return cmd, current_env + + +def prepare_multi_gpu_env(args: argparse.Namespace) -> Dict[str, str]: + """ + Prepares and returns an environment with the correct multi-GPU environment variables. + """ + num_processes = args.num_processes + num_machines = args.num_machines + main_process_ip = args.main_process_ip + main_process_port = args.main_process_port + if num_machines > 1: + args.nproc_per_node = str(num_processes // num_machines) + args.nnodes = str(num_machines) + args.node_rank = int(args.machine_rank) + if getattr(args, "same_network", False): + args.master_addr = str(main_process_ip) + args.master_port = str(main_process_port) + else: + args.rdzv_endpoint = f"{main_process_ip}:{main_process_port}" + else: + args.nproc_per_node = str(num_processes) + if main_process_port is not None: + args.master_port = str(main_process_port) + + if main_process_port is None: + main_process_port = 29500 + + # only need to check port availability in main process, in case we have to start multiple launchers on the same machine + # for some reasons like splitting log files. + need_port_check = num_machines <= 1 or int(args.machine_rank) == 0 + if need_port_check and is_port_in_use(main_process_port): + raise ConnectionError( + f"Tried to launch distributed communication on port `{main_process_port}`, but another process is utilizing it. " + "Please specify a different port (such as using the `--main_process_port` flag or specifying a different `main_process_port` in your config file)" + " and rerun your script. To automatically use the next open port (on a single node), you can set this to `0`." + ) + + if args.module and args.no_python: + raise ValueError("--module and --no_python cannot be used together") + elif args.module: + args.module = True + elif args.no_python: + args.no_python = True + + current_env = os.environ.copy() + if args.debug: + current_env["ACCELERATE_DEBUG_MODE"] = "true" + gpu_ids = getattr(args, "gpu_ids", "all") + if gpu_ids != "all" and args.gpu_ids is not None: + if is_xpu_available(): + current_env["ZE_AFFINITY_MASK"] = gpu_ids + elif is_mlu_available(): + current_env["MLU_VISIBLE_DEVICES"] = gpu_ids + elif is_npu_available(): + current_env["ASCEND_RT_VISIBLE_DEVICES"] = gpu_ids + else: + current_env["CUDA_VISIBLE_DEVICES"] = gpu_ids + mixed_precision = args.mixed_precision.lower() + try: + mixed_precision = PrecisionType(mixed_precision) + except ValueError: + raise ValueError(f"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}.") + + current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision) + + try: + dynamo_backend = DynamoBackend(args.dynamo_backend.upper()) + except ValueError: + raise ValueError( + f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}." + ) + current_env["ACCELERATE_DYNAMO_BACKEND"] = dynamo_backend.value + current_env["ACCELERATE_DYNAMO_MODE"] = args.dynamo_mode + current_env["ACCELERATE_DYNAMO_USE_FULLGRAPH"] = str(args.dynamo_use_fullgraph) + current_env["ACCELERATE_DYNAMO_USE_DYNAMIC"] = str(args.dynamo_use_dynamic) + + if args.use_fsdp: + current_env["ACCELERATE_USE_FSDP"] = "true" + if args.fsdp_cpu_ram_efficient_loading and not args.fsdp_sync_module_states: + raise ValueError("When using `--fsdp_cpu_ram_efficient_loading` set `--fsdp_sync_module_states` to `True`") + + current_env["FSDP_SHARDING_STRATEGY"] = str(args.fsdp_sharding_strategy) + current_env["FSDP_OFFLOAD_PARAMS"] = str(args.fsdp_offload_params).lower() + current_env["FSDP_MIN_NUM_PARAMS"] = str(args.fsdp_min_num_params) + if args.fsdp_auto_wrap_policy is not None: + current_env["FSDP_AUTO_WRAP_POLICY"] = str(args.fsdp_auto_wrap_policy) + if args.fsdp_transformer_layer_cls_to_wrap is not None: + current_env["FSDP_TRANSFORMER_CLS_TO_WRAP"] = str(args.fsdp_transformer_layer_cls_to_wrap) + if args.fsdp_backward_prefetch_policy is not None: + warnings.warn( + "`fsdp_backward_prefetch_policy` is deprecated and will be removed in version 0.27.0 of 🤗 Accelerate. Use" + " `fsdp_backward_prefetch` instead", + FutureWarning, + ) + args.fsdp_backward_prefetch = args.fsdp_backward_prefetch_policy + if args.fsdp_backward_prefetch is not None: + current_env["FSDP_BACKWARD_PREFETCH"] = str(args.fsdp_backward_prefetch) + if args.fsdp_state_dict_type is not None: + current_env["FSDP_STATE_DICT_TYPE"] = str(args.fsdp_state_dict_type) + current_env["FSDP_FORWARD_PREFETCH"] = str(args.fsdp_forward_prefetch).lower() + current_env["FSDP_USE_ORIG_PARAMS"] = str(args.fsdp_use_orig_params).lower() + current_env["FSDP_CPU_RAM_EFFICIENT_LOADING"] = str(args.fsdp_cpu_ram_efficient_loading).lower() + current_env["FSDP_SYNC_MODULE_STATES"] = str(args.fsdp_sync_module_states).lower() + + if args.use_megatron_lm: + prefix = "MEGATRON_LM_" + current_env["ACCELERATE_USE_MEGATRON_LM"] = "true" + current_env[prefix + "TP_DEGREE"] = str(args.megatron_lm_tp_degree) + current_env[prefix + "PP_DEGREE"] = str(args.megatron_lm_pp_degree) + current_env[prefix + "GRADIENT_CLIPPING"] = str(args.megatron_lm_gradient_clipping) + if args.megatron_lm_num_micro_batches is not None: + current_env[prefix + "NUM_MICRO_BATCHES"] = str(args.megatron_lm_num_micro_batches) + if args.megatron_lm_sequence_parallelism is not None: + current_env[prefix + "SEQUENCE_PARALLELISM"] = str(args.megatron_lm_sequence_parallelism) + if args.megatron_lm_recompute_activations is not None: + current_env[prefix + "RECOMPUTE_ACTIVATIONS"] = str(args.megatron_lm_recompute_activations) + if args.megatron_lm_use_distributed_optimizer is not None: + current_env[prefix + "USE_DISTRIBUTED_OPTIMIZER"] = str(args.megatron_lm_use_distributed_optimizer) + + current_env["OMP_NUM_THREADS"] = str(args.num_cpu_threads_per_process) + if args.enable_cpu_affinity: + current_env["ACCELERATE_CPU_AFFINITY"] = "1" + return current_env + + +def prepare_deepspeed_cmd_env(args: argparse.Namespace) -> Tuple[List[str], Dict[str, str]]: + """ + Prepares and returns the command list and an environment with the correct DeepSpeed environment variables. + """ + num_processes = args.num_processes + num_machines = args.num_machines + main_process_ip = args.main_process_ip + main_process_port = args.main_process_port + cmd = None + + # make sure launcher is not None + if args.deepspeed_multinode_launcher is None: + # set to default pdsh + args.deepspeed_multinode_launcher = DEEPSPEED_MULTINODE_LAUNCHERS[0] + + if num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]: + cmd = ["deepspeed", "--no_local_rank"] + cmd.extend(["--hostfile", str(args.deepspeed_hostfile), "--launcher", str(args.deepspeed_multinode_launcher)]) + if args.deepspeed_exclusion_filter is not None: + cmd.extend( + [ + "--exclude", + str(args.deepspeed_exclusion_filter), + ] + ) + elif args.deepspeed_inclusion_filter is not None: + cmd.extend( + [ + "--include", + str(args.deepspeed_inclusion_filter), + ] + ) + else: + cmd.extend(["--num_gpus", str(args.num_processes // args.num_machines)]) + if main_process_ip: + cmd.extend(["--master_addr", str(main_process_ip)]) + cmd.extend(["--master_port", str(main_process_port)]) + if args.module and args.no_python: + raise ValueError("--module and --no_python cannot be used together") + elif args.module: + cmd.append("--module") + elif args.no_python: + cmd.append("--no_python") + cmd.append(args.training_script) + cmd.extend(args.training_script_args) + elif num_machines > 1 and args.deepspeed_multinode_launcher == DEEPSPEED_MULTINODE_LAUNCHERS[1]: + args.nproc_per_node = str(num_processes // num_machines) + args.nnodes = str(num_machines) + args.node_rank = int(args.machine_rank) + if getattr(args, "same_network", False): + args.master_addr = str(main_process_ip) + args.master_port = str(main_process_port) + else: + args.rdzv_endpoint = f"{main_process_ip}:{main_process_port}" + else: + args.nproc_per_node = str(num_processes) + if main_process_port is not None: + args.master_port = str(main_process_port) + + if main_process_port is None: + main_process_port = 29500 + + # only need to check port availability in main process, in case we have to start multiple launchers on the same machine + # for some reasons like splitting log files. + need_port_check = num_machines <= 1 or int(args.machine_rank) == 0 + if need_port_check and is_port_in_use(main_process_port): + raise ConnectionError( + f"Tried to launch distributed communication on port `{main_process_port}`, but another process is utilizing it. " + "Please specify a different port (such as using the `--main_process_port` flag or specifying a different `main_process_port` in your config file)" + " and rerun your script. To automatically use the next open port (on a single node), you can set this to `0`." + ) + + if args.module and args.no_python: + raise ValueError("--module and --no_python cannot be used together") + elif args.module: + args.module = True + elif args.no_python: + args.no_python = True + + current_env = os.environ.copy() + if args.debug: + current_env["ACCELERATE_DEBUG_MODE"] = "true" + gpu_ids = getattr(args, "gpu_ids", "all") + if gpu_ids != "all" and args.gpu_ids is not None: + if is_xpu_available(): + current_env["ZE_AFFINITY_MASK"] = gpu_ids + elif is_mlu_available(): + current_env["MLU_VISIBLE_DEVICES"] = gpu_ids + elif is_npu_available(): + current_env["ASCEND_RT_VISIBLE_DEVICES"] = gpu_ids + else: + current_env["CUDA_VISIBLE_DEVICES"] = gpu_ids + try: + mixed_precision = PrecisionType(args.mixed_precision.lower()) + except ValueError: + raise ValueError( + f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." + ) + + current_env["PYTHONPATH"] = env_var_path_add("PYTHONPATH", os.path.abspath(".")) + current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision) + current_env["ACCELERATE_CONFIG_DS_FIELDS"] = str(args.deepspeed_fields_from_accelerate_config).lower() + current_env["ACCELERATE_USE_DEEPSPEED"] = "true" + if args.zero_stage is not None: + current_env["ACCELERATE_DEEPSPEED_ZERO_STAGE"] = str(args.zero_stage) + if args.gradient_accumulation_steps is not None: + current_env["ACCELERATE_GRADIENT_ACCUMULATION_STEPS"] = str(args.gradient_accumulation_steps) + if args.gradient_clipping is not None: + current_env["ACCELERATE_GRADIENT_CLIPPING"] = str(args.gradient_clipping).lower() + if args.offload_optimizer_device is not None: + current_env["ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE"] = str(args.offload_optimizer_device).lower() + if args.offload_param_device is not None: + current_env["ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE"] = str(args.offload_param_device).lower() + if args.zero3_init_flag is not None: + current_env["ACCELERATE_DEEPSPEED_ZERO3_INIT"] = str(args.zero3_init_flag).lower() + if args.zero3_save_16bit_model is not None: + current_env["ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL"] = str(args.zero3_save_16bit_model).lower() + if args.deepspeed_config_file is not None: + current_env["ACCELERATE_DEEPSPEED_CONFIG_FILE"] = str(args.deepspeed_config_file) + if args.enable_cpu_affinity: + current_env["ACCELERATE_CPU_AFFINITY"] = "1" + return cmd, current_env + + +def prepare_tpu( + args: argparse.Namespace, current_env: Dict[str, str], pod: bool = False +) -> Tuple[argparse.Namespace, Dict[str, str]]: + """ + Prepares and returns an environment with the correct TPU environment variables. + """ + if args.mixed_precision == "bf16" and is_torch_xla_available(check_is_tpu=True): + if args.downcast_bf16: + current_env["XLA_DOWNCAST_BF16"] = "1" + else: + current_env["XLA_USE_BF16"] = "1" + if args.debug: + current_env["ACCELERATE_DEBUG_MODE"] = "true" + if pod: + # Take explicit args and set them up for XLA + args.vm = args.tpu_vm + args.tpu = args.tpu_name + return args, current_env + + +def _convert_nargs_to_dict(nargs: List[str]) -> Dict[str, str]: + if len(nargs) < 0: + return {} + # helper function to infer type for argsparser + + def _infer_type(s): + try: + s = float(s) + + if s // 1 == s: + return int(s) + return s + except ValueError: + return s + + parser = argparse.ArgumentParser() + _, unknown = parser.parse_known_args(nargs) + for index, argument in enumerate(unknown): + if argument.startswith(("-", "--")): + action = None + if index + 1 < len(unknown): # checks if next index would be in list + if unknown[index + 1].startswith(("-", "--")): # checks if next element is an key + # raise an error if element is store_true or store_false + raise ValueError( + "SageMaker doesn’t support argparse actions for `store_true` or `store_false`. Please define explicit types" + ) + else: # raise an error if last element is store_true or store_false + raise ValueError( + "SageMaker doesn’t support argparse actions for `store_true` or `store_false`. Please define explicit types" + ) + # adds argument to parser based on action_store true + if action is None: + parser.add_argument(argument, type=_infer_type) + else: + parser.add_argument(argument, action=action) + + return { + key: (literal_eval(value) if value in ("True", "False") else value) + for key, value in parser.parse_args(nargs).__dict__.items() + } + + +def prepare_sagemager_args_inputs( + sagemaker_config: SageMakerConfig, args: argparse.Namespace +) -> Tuple[argparse.Namespace, Dict[str, Any]]: + # configure environment + print("Configuring Amazon SageMaker environment") + os.environ["AWS_DEFAULT_REGION"] = sagemaker_config.region + + # configure credentials + if sagemaker_config.profile is not None: + os.environ["AWS_PROFILE"] = sagemaker_config.profile + elif args.aws_access_key_id is not None and args.aws_secret_access_key is not None: + os.environ["AWS_ACCESS_KEY_ID"] = args.aws_access_key_id + os.environ["AWS_SECRET_ACCESS_KEY"] = args.aws_secret_access_key + else: + raise OSError("You need to provide an aws_access_key_id and aws_secret_access_key when not using aws_profile") + + # extract needed arguments + source_dir = os.path.dirname(args.training_script) + if not source_dir: # checks if string is empty + source_dir = "." + entry_point = os.path.basename(args.training_script) + if not entry_point.endswith(".py"): + raise ValueError(f'Your training script should be a python script and not "{entry_point}"') + + print("Converting Arguments to Hyperparameters") + hyperparameters = _convert_nargs_to_dict(args.training_script_args) + + try: + mixed_precision = PrecisionType(args.mixed_precision.lower()) + except ValueError: + raise ValueError( + f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." + ) + + try: + dynamo_backend = DynamoBackend(args.dynamo_backend.upper()) + except ValueError: + raise ValueError( + f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}." + ) + + # Environment variables to be set for use during training job + environment = { + "ACCELERATE_USE_SAGEMAKER": "true", + "ACCELERATE_MIXED_PRECISION": str(mixed_precision), + "ACCELERATE_DYNAMO_BACKEND": dynamo_backend.value, + "ACCELERATE_DYNAMO_MODE": args.dynamo_mode, + "ACCELERATE_DYNAMO_USE_FULLGRAPH": str(args.dynamo_use_fullgraph), + "ACCELERATE_DYNAMO_USE_DYNAMIC": str(args.dynamo_use_dynamic), + "ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE": sagemaker_config.distributed_type.value, + } + # configure distribution set up + distribution = None + if sagemaker_config.distributed_type == SageMakerDistributedType.DATA_PARALLEL: + distribution = {"smdistributed": {"dataparallel": {"enabled": True}}} + + # configure sagemaker inputs + sagemaker_inputs = None + if sagemaker_config.sagemaker_inputs_file is not None: + print(f"Loading SageMaker Inputs from {sagemaker_config.sagemaker_inputs_file} file") + sagemaker_inputs = {} + with open(sagemaker_config.sagemaker_inputs_file) as file: + for i, line in enumerate(file): + if i == 0: + continue + l = line.split("\t") + sagemaker_inputs[l[0]] = l[1].strip() + print(f"Loaded SageMaker Inputs: {sagemaker_inputs}") + + # configure sagemaker metrics + sagemaker_metrics = None + if sagemaker_config.sagemaker_metrics_file is not None: + print(f"Loading SageMaker Metrics from {sagemaker_config.sagemaker_metrics_file} file") + sagemaker_metrics = [] + with open(sagemaker_config.sagemaker_metrics_file) as file: + for i, line in enumerate(file): + if i == 0: + continue + l = line.split("\t") + metric_dict = { + "Name": l[0], + "Regex": l[1].strip(), + } + sagemaker_metrics.append(metric_dict) + print(f"Loaded SageMaker Metrics: {sagemaker_metrics}") + + # configure session + print("Creating Estimator") + args = { + "image_uri": sagemaker_config.image_uri, + "entry_point": entry_point, + "source_dir": source_dir, + "role": sagemaker_config.iam_role_name, + "transformers_version": sagemaker_config.transformers_version, + "pytorch_version": sagemaker_config.pytorch_version, + "py_version": sagemaker_config.py_version, + "base_job_name": sagemaker_config.base_job_name, + "instance_count": sagemaker_config.num_machines, + "instance_type": sagemaker_config.ec2_instance_type, + "debugger_hook_config": False, + "distribution": distribution, + "hyperparameters": hyperparameters, + "environment": environment, + "metric_definitions": sagemaker_metrics, + } + + if sagemaker_config.additional_args is not None: + args = merge_dicts(sagemaker_config.additional_args, args) + return args, sagemaker_inputs + + +def env_var_path_add(env_var_name, path_to_add): + """ + Extends a path-based environment variable's value with a new path and returns the updated value. It's up to the + caller to set it in os.environ. + """ + paths = [p for p in os.environ.get(env_var_name, "").split(":") if len(p) > 0] + paths.append(str(path_to_add)) + return ":".join(paths) + + +class PrepareForLaunch: + """ + Prepare a function that will launched in a distributed setup. + + Args: + launcher (`Callable`): + The function to launch. + distributed_type ([`~state.DistributedType`]): + The distributed type to prepare for. + debug (`bool`, *optional*, defaults to `False`): + Whether or not this is a debug launch. + """ + + def __init__(self, launcher, distributed_type="NO", debug=False): + self.launcher = launcher + self.distributed_type = DistributedType(distributed_type) + self.debug = debug + + def __call__(self, index, *args): + if self.debug: + world_size = int(os.environ.get("WORLD_SIZE")) + rdv_file = os.environ.get("ACCELERATE_DEBUG_RDV_FILE") + torch.distributed.init_process_group( + "gloo", + rank=index, + store=torch.distributed.FileStore(rdv_file, world_size), + world_size=world_size, + ) + elif self.distributed_type in ( + DistributedType.MULTI_GPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_NPU, + DistributedType.MULTI_XPU, + DistributedType.MULTI_CPU, + ): + # Prepare the environment for torch.distributed + os.environ["LOCAL_RANK"] = str(index) + nproc = int(os.environ.get("NPROC", 1)) + node_rank = int(os.environ.get("NODE_RANK", 0)) + os.environ["RANK"] = str(nproc * node_rank + index) + + os.environ["FORK_LAUNCHED"] = str(1) + self.launcher(*args) diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/megatron_lm.py b/venv/lib/python3.10/site-packages/accelerate/utils/megatron_lm.py new file mode 100644 index 0000000000000000000000000000000000000000..ff1eb199307fd44cb815644e8bb34d08e2d6adb6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/megatron_lm.py @@ -0,0 +1,1435 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import math +from abc import ABC +from functools import partial + +import torch +import torch.nn.functional as F +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss +from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP + +from ..optimizer import AcceleratedOptimizer +from ..scheduler import AcceleratedScheduler +from .imports import is_megatron_lm_available, is_transformers_available +from .operations import recursively_apply, send_to_device + + +if is_transformers_available(): + from transformers.modeling_outputs import ( + CausalLMOutputWithCrossAttentions, + Seq2SeqLMOutput, + SequenceClassifierOutput, + ) + + +if is_megatron_lm_available(): + from megatron import ( + get_args, + get_num_microbatches, + get_tensorboard_writer, + get_timers, + get_tokenizer, + mpu, + print_rank_0, + print_rank_last, + ) + from megatron.arguments import _add_data_args, _add_validation_args, parse_args, validate_args + from megatron.checkpointing import load_args_from_checkpoint, load_checkpoint, save_checkpoint + from megatron.data.data_samplers import MegatronPretrainingRandomSampler, MegatronPretrainingSampler + from megatron.global_vars import set_global_variables + from megatron.initialize import ( + _compile_dependencies, + _init_autoresume, + _set_random_seed, + set_jit_fusion_options, + write_args_to_tensorboard, + ) + from megatron.model import BertModel, Float16Module, GPTModel, ModelType, T5Model + from megatron.model import DistributedDataParallel as LocalDDP + from megatron.model.classification import Classification + from megatron.optimizer import get_megatron_optimizer + from megatron.schedules import get_forward_backward_func + from megatron.text_generation.communication import broadcast_int_list, broadcast_tensor + from megatron.text_generation.generation import ( + beam_search_and_return_on_first_stage, + generate_tokens_probs_and_return_on_first_stage, + ) + from megatron.tokenizer.tokenizer import _vocab_size_with_padding + from megatron.training import get_model, get_optimizer_param_scheduler, training_log + from megatron.utils import ( + average_losses_across_data_parallel_group, + calc_params_l2_norm, + get_ltor_masks_and_position_ids, + unwrap_model, + ) + + +# model utilities +def model_provider_func(pre_process=True, post_process=True, add_encoder=True, add_decoder=True): + """Build the model.""" + args = get_args() + mode = "pre-training" if args.pretraining_flag else "fine-tuning" + if args.rank == 0: + print(f"Building {args.model_type_name} model in the {mode} mode.") + print( + "The Megatron LM model weights are initialized at random in `accelerator.prepare`. " + "Please use `accelerator.load_checkpoint` to load a pre-trained checkpoint matching the distributed setup." + ) + if args.model_type_name == "bert": + if args.pretraining_flag: + num_tokentypes = 2 if args.bert_binary_head else 0 + model = BertModel( + num_tokentypes=num_tokentypes, + add_binary_head=args.bert_binary_head, + parallel_output=True, + pre_process=pre_process, + post_process=post_process, + ) + else: + model = Classification( + num_classes=args.num_labels, num_tokentypes=2, pre_process=pre_process, post_process=post_process + ) + elif args.model_type_name == "gpt": + model = GPTModel(num_tokentypes=0, parallel_output=True, pre_process=pre_process, post_process=post_process) + elif args.model_type_name == "t5": + model = T5Model( + num_tokentypes=0, + parallel_output=True, + pre_process=pre_process, + post_process=post_process, + add_encoder=add_encoder, + add_decoder=add_decoder, + ) + else: + raise ValueError(f"Unsupported model type: {args.model_type_name}") + return model + + +def prepare_model(accelerator): + accelerator.print("Preparing model") + args = get_args() + if accelerator.state.megatron_lm_plugin.custom_prepare_model_function is not None: + if accelerator.state.megatron_lm_plugin.custom_model_provider_function is None: + raise ValueError( + "You must provide a `custom_model_provider_function` when using a `custom_prepare_model_function`." + ) + custom_model_provider_func = accelerator.state.megatron_lm_plugin.custom_model_provider_function + model = accelerator.state.megatron_lm_plugin.custom_prepare_model_function(custom_model_provider_func) + else: + if args.model_type_name in ("bert", "gpt"): + model_type = ModelType.encoder_or_decoder + elif args.model_type_name == "t5": + model_type = ModelType.encoder_and_decoder + if args.pipeline_model_parallel_split_rank is None and args.pipeline_model_parallel_size > 1: + args.pipeline_model_parallel_split_rank = args.pipeline_model_parallel_size // 2 + model = get_model(model_provider_func, model_type) + return model + + +# dataloader utilities +class MegatronLMDummyDataLoader: + """ + Dummy dataloader presents model parameters or param groups, this is primarily used to follow conventional training + + Args: + **dataset_kwargs: Megatron data arguments. + """ + + def __init__(self, **dataset_kwargs): + parser = argparse.ArgumentParser() + parser = _add_data_args(parser) + parser = _add_validation_args(parser) + data_args = parser.parse_known_args() + self.dataset_args = vars(data_args[0]) + self.dataset_args.update(dataset_kwargs) + self.dataset_args["megatron_dataset_flag"] = True + + def set_megatron_data_args(self): + args = get_args() + for key, value in self.dataset_args.items(): + setattr(args, key, value) + + def get_train_valid_test_datasets_provider(self): + def train_valid_test_datasets_provider(train_val_test_num_samples): + """Build train, valid, and test datasets.""" + args = get_args() + dataset_args = { + "data_prefix": args.data_path, + "data_impl": args.data_impl, + "splits_string": args.split, + "train_valid_test_num_samples": train_val_test_num_samples, + "skip_warmup": (not args.mmap_warmup), + "seed": args.seed, + } + if args.model_type_name == "bert": + dataset_args.update( + { + "max_seq_length": args.seq_length, + "masked_lm_prob": args.mask_prob, + "short_seq_prob": args.short_seq_prob, + "binary_head": args.bert_binary_head, + } + ) + elif args.model_type_name == "gpt": + dataset_args.update( + { + "seq_length": args.seq_length, + } + ) + elif args.model_type_name == "t5": + dataset_args.update( + { + "max_seq_length": args.encoder_seq_length, + "max_seq_length_dec": args.decoder_seq_length, + "masked_lm_prob": args.mask_prob, + "short_seq_prob": args.short_seq_prob, + "dataset_type": "t5", + } + ) + else: + raise ValueError(f"Unsupported model type: {args.model_type_name}") + if args.model_type_name == "gpt": + from megatron.data.gpt_dataset import build_train_valid_test_datasets + else: + from megatron.data.dataset_utils import build_train_valid_test_datasets + train_ds, valid_ds, test_ds = build_train_valid_test_datasets(**dataset_args) + return train_ds, valid_ds, test_ds + + return train_valid_test_datasets_provider + + def build_pretraining_data_loader(self, dataset, consumed_samples): + if dataset is None: + return None + args = get_args() + micro_batch_size = args.micro_batch_size * args.num_micro_batches + + # Megatron sampler + if args.dataloader_type == "single": + batch_sampler = MegatronPretrainingSampler( + total_samples=len(dataset), + consumed_samples=consumed_samples, + micro_batch_size=micro_batch_size, + data_parallel_rank=mpu.get_data_parallel_rank(), + data_parallel_size=mpu.get_data_parallel_world_size(), + ) + elif args.dataloader_type == "cyclic": + batch_sampler = MegatronPretrainingRandomSampler( + dataset, + total_samples=len(dataset), + consumed_samples=consumed_samples, + micro_batch_size=micro_batch_size, + data_parallel_rank=mpu.get_data_parallel_rank(), + data_parallel_size=mpu.get_data_parallel_world_size(), + data_sharding=args.data_sharding, + ) + else: + raise Exception(f"{args.dataloader_type} dataloader type is not supported.") + + # Torch dataloader. + return torch.utils.data.DataLoader( + dataset, batch_sampler=batch_sampler, num_workers=args.num_workers, pin_memory=True + ) + + def build_train_valid_test_data_iterators(self): + def cyclic_iter(iter): + while True: + yield from iter + + args = get_args() + + (train_dataloader, valid_dataloader, test_dataloader) = (None, None, None) + + print_rank_0("> building train, validation, and test datasets ...") + + # Backward compatibility, assume fixed batch size. + if args.iteration > 0 and args.consumed_train_samples == 0: + assert args.train_samples is None, "only backward compatiblity support for iteration-based training" + args.consumed_train_samples = args.iteration * args.global_batch_size + if args.iteration > 0 and args.consumed_valid_samples == 0: + if args.train_samples is None: + args.consumed_valid_samples = ( + (args.iteration // args.eval_interval) * args.eval_iters * args.global_batch_size + ) + + # Data loader only on rank 0 of each model parallel group. + if mpu.get_tensor_model_parallel_rank() == 0: + # Number of train/valid/test samples. + if args.train_samples: + train_samples = args.train_samples + else: + train_samples = args.train_iters * args.global_batch_size + eval_iters = (args.train_iters // args.eval_interval + 1) * args.eval_iters + test_iters = args.eval_iters + train_val_test_num_samples = [ + train_samples, + eval_iters * args.global_batch_size, + test_iters * args.global_batch_size, + ] + print_rank_0(" > datasets target sizes (minimum size):") + print_rank_0(f" train: {train_val_test_num_samples[0]}") + print_rank_0(f" validation: {train_val_test_num_samples[1]}") + print_rank_0(f" test: {train_val_test_num_samples[2]}") + + # Build the datasets. + train_valid_test_datasets_provider = self.get_train_valid_test_datasets_provider() + train_ds, valid_ds, test_ds = train_valid_test_datasets_provider(train_val_test_num_samples) + + # Build dataloders. + train_dataloader = self.build_pretraining_data_loader(train_ds, args.consumed_train_samples) + valid_dataloader = self.build_pretraining_data_loader(valid_ds, args.consumed_valid_samples) + test_dataloader = self.build_pretraining_data_loader(test_ds, 0) + + # Flags to know if we need to do training/validation/testing. + do_train = train_dataloader is not None and args.train_iters > 0 + do_valid = valid_dataloader is not None and args.eval_iters > 0 + do_test = test_dataloader is not None and args.eval_iters > 0 + # Need to broadcast num_tokens and num_type_tokens. + flags = torch.cuda.LongTensor([int(do_train), int(do_valid), int(do_test)]) + else: + flags = torch.cuda.LongTensor([0, 0, 0]) + + # Broadcast num tokens. + torch.distributed.broadcast( + flags, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group() + ) + args.do_train = flags[0].item() + args.do_valid = flags[1].item() + args.do_test = flags[2].item() + + # Build iterators. + dl_type = args.dataloader_type + assert dl_type in ["single", "cyclic"] + + if train_dataloader is not None: + train_data_iterator = ( + iter(train_dataloader) if dl_type == "single" else iter(cyclic_iter(train_dataloader)) + ) + else: + train_data_iterator = None + + if valid_dataloader is not None: + valid_data_iterator = ( + iter(valid_dataloader) if dl_type == "single" else iter(cyclic_iter(valid_dataloader)) + ) + else: + valid_data_iterator = None + + if test_dataloader is not None: + test_data_iterator = iter(test_dataloader) if dl_type == "single" else iter(cyclic_iter(test_dataloader)) + else: + test_data_iterator = None + + return train_data_iterator, valid_data_iterator, test_data_iterator + + +def prepare_data_loader(accelerator, dataloader): + accelerator.print("Preparing dataloader") + args = get_args() + if not args.megatron_dataset_flag: + from ..data_loader import _PYTORCH_DATALOADER_KWARGS, prepare_data_loader + + args = get_args() + micro_batch_size = args.micro_batch_size * args.num_micro_batches + kwargs = {k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k]) for k in _PYTORCH_DATALOADER_KWARGS} + if kwargs["batch_size"] is None: + if isinstance(kwargs["sampler"], torch.utils.data.BatchSampler): + kwargs["sampler"].batch_size = micro_batch_size + else: + del kwargs["sampler"] + del kwargs["shuffle"] + del kwargs["batch_size"] + kwargs["batch_sampler"].batch_size = micro_batch_size + else: + del kwargs["batch_sampler"] + kwargs["batch_size"] = micro_batch_size + + dataloader = torch.utils.data.DataLoader(dataloader.dataset, **kwargs) + return prepare_data_loader( + dataloader, + accelerator.device, + num_processes=mpu.get_data_parallel_world_size(), + process_index=mpu.get_data_parallel_rank(), + split_batches=accelerator.split_batches, + put_on_device=True, + rng_types=accelerator.rng_types.copy(), + dispatch_batches=accelerator.dispatch_batches, + ) + else: + if args.consumed_samples is not None: + ( + args.consumed_train_samples, + args.consumed_valid_samples, + args.consumed_test_samples, + ) = args.consumed_samples + else: + args.consumed_train_samples, args.consumed_valid_samples, args.consumed_test_samples = 0, 0, 0 + ( + train_data_iterator, + valid_data_iterator, + test_data_iterator, + ) = dataloader.build_train_valid_test_data_iterators() + return train_data_iterator, valid_data_iterator, test_data_iterator + + +# optimizer utilities +class MegatronLMOptimizerWrapper(AcceleratedOptimizer): + def __init__(self, optimizer): + super().__init__(optimizer, device_placement=False, scaler=None) + + def zero_grad(self, set_to_none=None): + pass # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed + + def step(self): + pass # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed + + @property + def step_was_skipped(self): + """Whether or not the optimizer step was done, or skipped because of gradient overflow.""" + return self.optimizer.skipped_iter + + +def prepare_optimizer(accelerator, model): + accelerator.print("Preparing optimizer") + args = get_args() + optimizer = get_megatron_optimizer(model, args.no_wd_decay_cond, args.scale_lr_cond, args.lr_mult) + return optimizer + + +# scheduler utilities +class MegatronLMDummyScheduler: + """ + Dummy scheduler presents model parameters or param groups, this is primarily used to follow conventional training + loop when scheduler config is specified in the deepspeed config file. + + Args: + optimizer (`torch.optim.optimizer.Optimizer`): + The optimizer to wrap. + total_num_steps (int): + Total number of steps. + warmup_num_steps (int): + Number of steps for warmup. + **kwargs (additional keyword arguments, *optional*): + Other arguments. + """ + + def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, **kwargs): + self.optimizer = optimizer + self.total_num_steps = total_num_steps + self.warmup_num_steps = warmup_num_steps + self.kwargs = kwargs + + +class MegatronLMSchedulerWrapper(AcceleratedScheduler): + def __init__(self, scheduler, optimizers): + super().__init__(scheduler, optimizers) + + def step(self, *args, **kwargs): + return # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed + + +def prepare_scheduler(accelerator, optimizer, scheduler): + accelerator.print("Preparing scheduler") + scheduler = get_optimizer_param_scheduler(optimizer) + return scheduler + + +class AbstractTrainStep(ABC): + """Abstract class for batching, forward pass and loss handler.""" + + def __init__(self, name): + super().__init__() + self.name = name + + def get_batch_func(self): + pass + + def get_forward_step_func(self): + pass + + def get_loss_func(self): + pass + + +class BertTrainStep(AbstractTrainStep): + """ + Bert train step class. + + Args: + args (`argparse.Namespace`): Megatron-LM arguments. + """ + + def __init__(self, args): + super().__init__("BertTrainStep") + self.get_batch = self.get_batch_func(args.megatron_dataset_flag) + self.loss_func = self.get_loss_func(args.pretraining_flag, args.num_labels) + self.forward_step = self.get_forward_step_func(args.pretraining_flag, args.bert_binary_head) + if not args.model_return_dict: + self.model_output_class = None + else: + self.model_output_class = SequenceClassifierOutput + + def get_batch_func(self, megatron_dataset_flag): + def get_batch_megatron(data_iterator): + """Build the batch.""" + + # Items and their type. + keys = ["text", "types", "labels", "is_random", "loss_mask", "padding_mask"] + datatype = torch.int64 + + # Broadcast data. + if data_iterator is not None: + data = next(data_iterator) + else: + data = None + data_b = mpu.broadcast_data(keys, data, datatype) + + # Unpack. + tokens = data_b["text"].long() + types = data_b["types"].long() + sentence_order = data_b["is_random"].long() + loss_mask = data_b["loss_mask"].float() + lm_labels = data_b["labels"].long() + padding_mask = data_b["padding_mask"].long() + + return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask + + def get_batch_transformer(data_iterator): + """Build the batch.""" + data = next(data_iterator) + data = send_to_device(data, torch.cuda.current_device()) + + # Unpack. + tokens = data["input_ids"].long() + padding_mask = data["attention_mask"].long() + if "token_type_ids" in data: + types = data["token_type_ids"].long() + else: + types = None + if "labels" in data: + lm_labels = data["labels"].long() + loss_mask = (data["labels"] != -100).to(torch.float) + else: + lm_labels = None + loss_mask = None + if "next_sentence_label" in data: + sentence_order = data["next_sentence_label"].long() + else: + sentence_order = None + + return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask + + if megatron_dataset_flag: + return get_batch_megatron + else: + return get_batch_transformer + + def get_loss_func(self, pretraining_flag, num_labels): + def loss_func_pretrain(loss_mask, sentence_order, output_tensor): + lm_loss_, sop_logits = output_tensor + + lm_loss_ = lm_loss_.float() + loss_mask = loss_mask.float() + lm_loss = torch.sum(lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum() + + if sop_logits is not None: + sop_loss = F.cross_entropy(sop_logits.view(-1, 2).float(), sentence_order.view(-1), ignore_index=-1) + sop_loss = sop_loss.float() + loss = lm_loss + sop_loss + averaged_losses = average_losses_across_data_parallel_group([lm_loss, sop_loss]) + return loss, {"lm loss": averaged_losses[0], "sop loss": averaged_losses[1]} + + else: + loss = lm_loss + averaged_losses = average_losses_across_data_parallel_group([lm_loss]) + return loss, {"lm loss": averaged_losses[0]} + + def loss_func_finetune(labels, logits): + if num_labels == 1: + # We are doing regression + loss_fct = MSELoss() + loss = loss_fct(logits.view(-1), labels.view(-1)) + elif self.num_labels > 1 and (labels.dtype in (torch.long, torch.int)): + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, num_labels), labels.view(-1)) + else: + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + averaged_losses = average_losses_across_data_parallel_group([loss]) + return loss, {"loss": averaged_losses[0]} + + if pretraining_flag: + return loss_func_pretrain + else: + return loss_func_finetune + + def get_forward_step_func(self, pretraining_flag, bert_binary_head): + def forward_step(data_iterator, model): + """Forward step.""" + tokens, types, sentence_order, loss_mask, labels, padding_mask = self.get_batch(data_iterator) + if not bert_binary_head: + types = None + # Forward pass through the model. + if pretraining_flag: + output_tensor = model(tokens, padding_mask, tokentype_ids=types, lm_labels=labels) + return output_tensor, partial(self.loss_func, loss_mask, sentence_order) + else: + logits = model(tokens, padding_mask, tokentype_ids=types) + return logits, partial(self.loss_func, labels) + + return forward_step + + +class GPTTrainStep(AbstractTrainStep): + """ + GPT train step class. + + Args: + args (`argparse.Namespace`): Megatron-LM arguments. + """ + + def __init__(self, args): + super().__init__("GPTTrainStep") + self.get_batch = self.get_batch_func(args.megatron_dataset_flag) + self.loss_func = self.get_loss_func() + self.forward_step = self.get_forward_step_func() + self.eod_token = args.padded_vocab_size - 1 + if args.vocab_file is not None: + tokenizer = get_tokenizer() + self.eod_token = tokenizer.eod + self.reset_position_ids = args.reset_position_ids + self.reset_attention_mask = args.reset_attention_mask + self.eod_mask_loss = args.eod_mask_loss + if not args.model_return_dict: + self.model_output_class = None + else: + self.model_output_class = CausalLMOutputWithCrossAttentions + + def get_batch_func(self, megatron_dataset_flag): + def get_batch_megatron(data_iterator): + """Generate a batch""" + # Items and their type. + keys = ["text"] + datatype = torch.int64 + + # Broadcast data. + if data_iterator is not None: + data = next(data_iterator) + else: + data = None + data_b = mpu.broadcast_data(keys, data, datatype) + + # Unpack. + tokens_ = data_b["text"].long() + labels = tokens_[:, 1:].contiguous() + tokens = tokens_[:, :-1].contiguous() + + # Get the masks and postition ids. + attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids( + tokens, self.eod_token, self.reset_position_ids, self.reset_attention_mask, self.eod_mask_loss + ) + + return tokens, labels, loss_mask, attention_mask, position_ids + + def get_batch_transformer(data_iterator): + data = next(data_iterator) + data = {"input_ids": data["input_ids"]} + data = send_to_device(data, torch.cuda.current_device()) + + tokens_ = data["input_ids"].long() + padding = torch.zeros((tokens_.shape[0], 1), dtype=tokens_.dtype, device=tokens_.device) + self.eod_token + tokens_ = torch.concat([tokens_, padding], dim=1) + labels = tokens_[:, 1:].contiguous() + tokens = tokens_[:, :-1].contiguous() + # Get the masks and postition ids. + attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids( + tokens, self.eod_token, self.reset_position_ids, self.reset_attention_mask, True + ) + return tokens, labels, loss_mask, attention_mask, position_ids + + if megatron_dataset_flag: + return get_batch_megatron + else: + return get_batch_transformer + + def get_loss_func(self): + args = get_args() + + def loss_func(loss_mask, output_tensor): + if args.return_logits: + losses, logits = output_tensor + else: + losses = output_tensor + losses = losses.float() + loss_mask = loss_mask.view(-1).float() + loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum() + + # Reduce loss for logging. + averaged_loss = average_losses_across_data_parallel_group([loss]) + + output_dict = {"lm loss": averaged_loss[0]} + if args.return_logits: + output_dict.update({"logits": logits}) + return loss, output_dict + + return loss_func + + def get_forward_step_func(self): + def forward_step(data_iterator, model): + """Forward step.""" + # Get the batch. + tokens, labels, loss_mask, attention_mask, position_ids = self.get_batch(data_iterator) + output_tensor = model(tokens, position_ids, attention_mask, labels=labels) + + return output_tensor, partial(self.loss_func, loss_mask) + + return forward_step + + +class T5TrainStep(AbstractTrainStep): + """ + T5 train step class. + + Args: + args (`argparse.Namespace`): Megatron-LM arguments. + """ + + def __init__(self, args): + super().__init__("T5TrainStep") + self.get_batch = self.get_batch_func(args.megatron_dataset_flag) + self.loss_func = self.get_loss_func() + self.forward_step = self.get_forward_step_func() + if not args.model_return_dict: + self.model_output_class = None + else: + self.model_output_class = Seq2SeqLMOutput + + @staticmethod + def attn_mask_postprocess(attention_mask): + # We create a 3D attention mask from a 2D tensor mask. + # [b, 1, s] + attention_mask_b1s = attention_mask.unsqueeze(1) + # [b, s, 1] + attention_mask_bs1 = attention_mask.unsqueeze(2) + # [b, s, s] + attention_mask_bss = attention_mask_b1s * attention_mask_bs1 + # Convert attention mask to binary: + extended_attention_mask = attention_mask_bss < 0.5 + return extended_attention_mask + + @staticmethod + def get_decoder_mask(seq_length, device): + attention_mask = torch.tril(torch.ones((1, seq_length, seq_length), device=device)) + attention_mask = attention_mask < 0.5 + return attention_mask + + @staticmethod + def get_enc_dec_mask(attention_mask, dec_seq_length, device): + batch_size, _ = attention_mask.shape + # We create a 3D attention mask from a 2D tensor mask. + # [b, 1, s] + attention_mask_b1s = attention_mask.unsqueeze(1) + # [b, s, 1] + attention_mask_bs1 = torch.ones((batch_size, dec_seq_length, 1), device=device) + attention_mask_bss = attention_mask_bs1 * attention_mask_b1s + extended_attention_mask = attention_mask_bss < 0.5 + return extended_attention_mask + + def get_batch_func(self, megatron_dataset_flag): + def get_batch_megatron(data_iterator): + """Build the batch.""" + + keys = ["text_enc", "text_dec", "labels", "loss_mask", "enc_mask", "dec_mask", "enc_dec_mask"] + datatype = torch.int64 + + # Broadcast data. + if data_iterator is not None: + data = next(data_iterator) + else: + data = None + data_b = mpu.broadcast_data(keys, data, datatype) + + # Unpack. + tokens_enc = data_b["text_enc"].long() + tokens_dec = data_b["text_dec"].long() + labels = data_b["labels"].long() + loss_mask = data_b["loss_mask"].float() + + enc_mask = data_b["enc_mask"] < 0.5 + dec_mask = data_b["dec_mask"] < 0.5 + enc_dec_mask = data_b["enc_dec_mask"] < 0.5 + + return tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask, enc_dec_mask + + def get_batch_transformer(data_iterator): + """Build the batch.""" + data = next(data_iterator) + data = send_to_device(data, torch.cuda.current_device()) + + tokens_enc = data["input_ids"].long() + labels = data["labels"].long() + loss_mask = (labels != -100).to(torch.float) + if "decoder_input_ids" in data: + tokens_dec = data["decoder_input_ids"].long() + else: + tokens_dec = labels.new_zeros(labels.shape, device=labels.device, dtype=torch.long) + tokens_dec[..., 1:] = labels[..., :-1].clone() + tokens_dec[..., 0] = 0 + tokens_dec.masked_fill_(tokens_dec == -100, 0) + enc_mask = T5TrainStep.attn_mask_postprocess(data["attention_mask"].long()) + dec_mask = T5TrainStep.get_decoder_mask(tokens_dec.shape[1], tokens_dec.device) + enc_dec_mask = T5TrainStep.get_enc_dec_mask( + data["attention_mask"].long(), tokens_dec.shape[1], tokens_dec.device + ) + + return tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask, enc_dec_mask + + if megatron_dataset_flag: + return get_batch_megatron + else: + return get_batch_transformer + + def get_loss_func(self): + def loss_func(loss_mask, output_tensor): + lm_loss_ = output_tensor.float() + lm_loss = torch.sum(lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum() + + loss = lm_loss + averaged_losses = average_losses_across_data_parallel_group([lm_loss]) + + return loss, {"lm loss": averaged_losses[0]} + + return loss_func + + def get_forward_step_func(self): + def forward_step(data_iterator, model): + """Forward step.""" + # Get the batch. + tokens_enc, tokens_dec, loss_mask, lm_labels, enc_mask, dec_mask, enc_dec_mask = self.get_batch( + data_iterator + ) + # Forward model lm_labels + output_tensor = model( + tokens_enc, tokens_dec, enc_mask, dec_mask, enc_dec_mask, tokentype_ids=None, lm_labels=lm_labels + ) + + return output_tensor, partial(self.loss_func, loss_mask) + + return forward_step + + +# intialize megatron setup +def initialize(accelerator, extra_args_provider=None, args_defaults={}): + accelerator.print("Initializing Megatron-LM") + assert torch.cuda.is_available(), "Megatron requires CUDA." + + # Parse arguments + args = parse_args(extra_args_provider, ignore_unknown_args=True) + + # Set defaults + for key, value in args_defaults.items(): + if getattr(args, key, None) is not None: + if args.rank == 0: + print( + f"WARNING: overriding default arguments for " f"{key}:{getattr(args, key)} with {key}:{value}", + flush=True, + ) + setattr(args, key, value) + + if args.use_checkpoint_args or args_defaults.get("use_checkpoint_args", False): + assert args.load is not None, "--use-checkpoints-args requires --load argument" + load_args_from_checkpoint(args) + + validate_args(args) + + # set global args, build tokenizer, and set adlr-autoresume, + # tensorboard-writer, and timers. + set_global_variables(args) + + # torch.distributed initialization + def finish_mpu_init(): + args = get_args() + # Pytorch distributed. + device_count = torch.cuda.device_count() + args.rank = torch.distributed.get_rank() + args.world_size = torch.distributed.get_world_size() + if device_count > 0: + device = args.rank % device_count + if args.local_rank is not None: + assert args.local_rank == device, "expected local-rank to be the same as rank % device-count." + else: + args.local_rank = device + + # Set the tensor model-parallel, pipeline model-parallel, and + # data-parallel communicators. + if mpu.model_parallel_is_initialized(): + print("model parallel is already initialized") + else: + mpu.initialize_model_parallel( + args.tensor_model_parallel_size, + args.pipeline_model_parallel_size, + args.virtual_pipeline_model_parallel_size, + args.pipeline_model_parallel_split_rank, + ) + + # Random seeds for reproducibility. + if args.rank == 0: + print(f"> setting random seeds to {args.seed} ...") + _set_random_seed(args.seed, args.data_parallel_random_init) + + args = get_args() + + # Megatron's MPU is the master. Complete initialization right away. + finish_mpu_init() + + # Autoresume. + _init_autoresume() + + # Compile dependencies. + _compile_dependencies() + + # Set pytorch JIT layer fusion options and warmup JIT functions. + set_jit_fusion_options() + args = get_args() + args.padded_vocab_size = _vocab_size_with_padding(args.orig_vocab_size, args) + if args.model_type_name == "bert" and args.pretraining_flag and args.num_labels == 2: + args.bert_binary_head = True + else: + args.bert_binary_head = False + args.iteration = 0 + + +class MegatronEngine(torch.nn.Module): + """ + Megatron-LM model wrapper + + Args: + accelerator (:class:`~accelerate.Accelerator`): The accelerator object to use. + model: Megatron-LM model + optimizer: Megatron-LM optimizer + lr_scheduler: Megatron-LM lr scheduler + """ + + def __init__(self, accelerator, model, optimizer, scheduler): + super().__init__() + self.module = model + self.base_model = model[0] + self.optimizer = optimizer + self.scheduler = scheduler + args = get_args() + if accelerator.state.megatron_lm_plugin.custom_train_step_class is not None: + self.train_step_handler = accelerator.state.megatron_lm_plugin.custom_train_step_class( + args, **accelerator.state.megatron_lm_plugin.custom_train_step_kwargs + ) + elif args.model_type_name == "bert": + self.train_step_handler = BertTrainStep(args) + elif args.model_type_name == "gpt": + self.train_step_handler = GPTTrainStep(args) + elif args.model_type_name == "t5": + self.train_step_handler = T5TrainStep(args) + else: + raise ValueError(f"Unsupported model type: {args.model_type_name}") + self.optimizer.skipped_iter = False + + # Tracking loss. + self.total_loss_dict = {} + self.eval_total_loss_dict = {} + self.iteration = 0 + self.report_memory_flag = True + if args.tensorboard_dir is not None: + write_args_to_tensorboard() + + def train(self): + for model_module in self.module: + model_module.train() + self.log_eval_results() + + def eval(self): + for model_module in self.module: + model_module.eval() + + def train_step(self, **batch_data): + """ + Training step for Megatron-LM + + Args: + batch_data (:obj:`dict`): The batch data to train on. + """ + + args = get_args() + timers = get_timers() + + if len(batch_data) > 0: + data_chunks = [] + if args.num_micro_batches > 1: + for i in range(0, args.num_micro_batches): + data_chunks.append( + { + k: v[i * args.micro_batch_size : (i + 1) * args.micro_batch_size] + for k, v in batch_data.items() + } + ) + else: + data_chunks = [batch_data] + + if len(self.module) > 1: + batch_data_iterator = ( + [iter(data_chunks) for _ in range(len(self.module))] + if len(batch_data) > 0 + else [None] * len(self.module) + ) + else: + batch_data_iterator = iter(data_chunks) if len(batch_data) > 0 else None + + # Set grad to zero. + if args.DDP_impl == "local" and args.use_contiguous_buffers_in_local_ddp: + for partition in self.module: + partition.zero_grad_buffer() + self.optimizer.zero_grad() + + # Forward pass. + forward_backward_func = get_forward_backward_func() + losses_reduced = forward_backward_func( + self.train_step_handler.forward_step, + batch_data_iterator, + self.module, + self.optimizer, + None, + forward_only=False, + ) + + # Empty unused memory. + if args.empty_unused_memory_level >= 1: + torch.cuda.empty_cache() + + # Reduce gradients. + timers("backward-reduce-model-grads").start() + self.optimizer.reduce_model_grads(args, timers) + timers("backward-reduce-model-grads").stop() + + # Update parameters. + timers("optimizer").start() + update_successful, grad_norm, num_zeros_in_grad = self.optimizer.step(args, timers) + timers("optimizer").stop() + + # Gather params. + if update_successful: + timers("backward-gather-model-params").start() + self.optimizer.gather_model_params(args, timers) + timers("backward-gather-model-params").stop() + + # Update learning rate. + if update_successful: + if self.scheduler is not None: + increment = get_num_microbatches() * args.micro_batch_size * args.data_parallel_size + self.scheduler.step(increment=increment) + skipped_iter = 0 + else: + skipped_iter = 1 + + self.optimizer.skipped_iter = not update_successful + + # Empty unused memory. + if args.empty_unused_memory_level >= 2: + torch.cuda.empty_cache() + + args.consumed_train_samples += ( + mpu.get_data_parallel_world_size() * args.micro_batch_size * get_num_microbatches() + ) + + if mpu.is_pipeline_last_stage(ignore_virtual=True): + # Average loss across microbatches. + loss_reduced = {} + for key in losses_reduced[0]: + losses_reduced_for_key = [x[key] for x in losses_reduced] + if len(losses_reduced_for_key[0].shape) == 0: + loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key) + else: + loss_reduced[key] = torch.concat(losses_reduced_for_key) + return loss_reduced, skipped_iter, grad_norm, num_zeros_in_grad + return {}, skipped_iter, grad_norm, num_zeros_in_grad + + def eval_step(self, **batch_data): + """ + Evaluation step for Megatron-LM + + Args: + batch_data (:obj:`dict`): The batch data to evaluate on. + """ + + args = get_args() + data_chunks = [] + if args.num_micro_batches > 1: + for i in range(0, args.num_micro_batches): + data_chunks.append( + {k: v[i * args.micro_batch_size : (i + 1) * args.micro_batch_size] for k, v in batch_data.items()} + ) + else: + data_chunks = [batch_data] + + if len(self.module) > 1: + batch_data_iterator = [iter(data_chunks) for _ in range(len(self.module))] + else: + batch_data_iterator = iter(data_chunks) + forward_backward_func = get_forward_backward_func() + loss_dicts = forward_backward_func( + self.train_step_handler.forward_step, + batch_data_iterator, + self.module, + optimizer=None, + timers=None, + forward_only=True, + ) + # Empty unused memory + if args.empty_unused_memory_level >= 1: + torch.cuda.empty_cache() + + args.consumed_valid_samples += ( + mpu.get_data_parallel_world_size() * args.micro_batch_size * get_num_microbatches() + ) + + if mpu.is_pipeline_last_stage(ignore_virtual=True): + # Average loss across microbatches. + loss_reduced = {} + for key in loss_dicts[0]: + losses_reduced_for_key = [x[key] for x in loss_dicts] + if len(losses_reduced_for_key[0].shape) == 0: + loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key) + else: + loss_reduced[key] = torch.concat(losses_reduced_for_key) + return loss_reduced + else: + return {} + + def forward(self, **batch_data): + # During training, we use train_step() + # model(**batch_data) performs following operations by delegating it to `self.train_step`: + # 1. Prepare **batch_data for Tendor, Pipeline and Model Parallelism + # 2. Set grad to zero. + # 3. forward pass and backward pass using Pipeline Parallelism + # 4. Empty unused memory. + # 5. Reduce gradients. + # 6. Update parameters. + # 7. Gather params when using Distributed Optimizer (Data Parallelism). + # 8. Update learning rate if scheduler is specified. + # 9. Empty unused memory. + # 10. Average loss across microbatches and across DP ranks. + # + # During evaluation, we use eval_step() + args = get_args() + if self.module[0].training: + loss_dict, skipped_iter, grad_norm, num_zeros_in_grad = self.train_step(**batch_data) + self.iteration += 1 + if args.tensorboard_dir is not None: + # Logging. + loss_scale = self.optimizer.get_loss_scale().item() + params_norm = None + if args.log_params_norm: + params_norm = calc_params_l2_norm(self.model) + self.report_memory_flag = training_log( + loss_dict, + self.total_loss_dict, + self.optimizer.param_groups[0]["lr"], + self.iteration, + loss_scale, + self.report_memory_flag, + skipped_iter, + grad_norm, + params_norm, + num_zeros_in_grad, + ) + else: + loss_dict = self.eval_step(**batch_data) + if args.tensorboard_dir is not None: + for key in loss_dict: + self.eval_total_loss_dict[key] = ( + self.eval_total_loss_dict.get(key, torch.cuda.FloatTensor([0.0])) + loss_dict[key] + ) + self.eval_total_loss_dict[key + "_num_iters"] = self.eval_total_loss_dict.get( + key + "_num_iters", torch.cuda.FloatTensor([0.0]) + ) + torch.cuda.FloatTensor([1.0]) + + loss = torch.tensor(0.0, device=args.local_rank) + for key in loss_dict: + if len(loss_dict[key].shape) == 0: + loss += loss_dict[key] + + logits = None + if "logits" in loss_dict: + logits = loss_dict["logits"] + # loss = reduce(loss) + if self.train_step_handler.model_output_class is not None: + return self.train_step_handler.model_output_class(loss=loss, logits=logits) + return loss + + def log_eval_results(self): + args = get_args() + if args.tensorboard_dir is None or self.iteration == 0: + return + args = get_args() + writer = get_tensorboard_writer() + string = f"validation loss at iteration {self.iteration} | " + for key in self.eval_total_loss_dict: + if key.endswith("_num_iters"): + continue + value = self.eval_total_loss_dict[key] / self.eval_total_loss_dict[key + "_num_iters"] + string += f"{key} value: {value} | " + ppl = math.exp(min(20, value.item())) + if args.pretraining_flag: + string += f"{key} PPL: {ppl} | " + if writer: + writer.add_scalar(f"{key} validation", value.item(), self.iteration) + if args.pretraining_flag: + writer.add_scalar(f"{key} validation ppl", ppl, self.iteration) + + length = len(string) + 1 + print_rank_last("-" * length) + print_rank_last(string) + print_rank_last("-" * length) + self.eval_total_loss_dict = {} + + def save_checkpoint(self, output_dir): + self.log_eval_results() + args = get_args() + args.save = output_dir + torch.distributed.barrier() + save_checkpoint(self.iteration, self.module, self.optimizer, self.scheduler) + torch.distributed.barrier() + + def load_checkpoint(self, input_dir): + args = get_args() + args.load = input_dir + args.consumed_train_samples = 0 + args.consumed_valid_samples = 0 + torch.distributed.barrier() + iteration = load_checkpoint(self.module, self.optimizer, self.scheduler) + torch.distributed.barrier() + self.iteration = iteration + if args.fp16 and self.iteration == 0: + self.optimizer.reload_model_params() + + def megatron_generate( + self, + inputs, + attention_mask=None, + max_length=None, + max_new_tokens=None, + num_beams=None, + temperature=None, + top_k=None, + top_p=None, + length_penalty=None, + **kwargs, + ): + """ + Generate method for GPT2 model. This method is used for inference. Supports both greedy and beam search along + with sampling. Refer the Megatron-LM repo for more details + + Args: + inputs (torch.Tensor): input ids + attention_mask (torch.Tensor, optional): attention mask. Defaults to None. + max_length (int, optional): max length of the generated sequence. Defaults to None. + Either this or max_new_tokens should be provided. + max_new_tokens (int, optional): max number of tokens to be generated. Defaults to None. + Either this or max_length should be provided. + num_beams (int, optional): number of beams to use for beam search. Defaults to None. + temperature (float, optional): temperature for sampling. Defaults to 1.0. + top_k (int, optional): top k tokens to consider for sampling. Defaults to 0.0. + top_p (float, optional): tokens in top p probability are considered for sampling. Defaults to 0.0. + length_penalty (float, optional): length penalty for beam search. Defaults to None. + kwargs: additional key-value arguments + """ + + # checking if required arguments are passed + args = get_args() + if args.model_type_name != "gpt": + raise NotImplementedError("Generate method is not implemented for this model") + + if args.data_parallel_size > 1: + raise ValueError("Generate method requires data parallelism to be 1") + + if args.sequence_parallel: + raise ValueError("Generate method requires sequence parallelism to be False") + + if args.recompute_granularity is not None: + raise ValueError("Checkpoint activations cannot be set for inference") + + if args.vocab_file is None: + raise ValueError("Vocab file is required for inference") + + # Prepare inputs + if max_length is None and max_new_tokens is None: + raise ValueError("`max_length` or `max_new_tokens` are required for inference") + + if temperature is None: + temperature = 1.0 + elif not (0.0 < temperature <= 100.0): + raise ValueError("temperature must be a positive number less than or equal to 100.0") + + if top_k is None: + top_k = 0 + elif not (0 <= top_k <= 1000): + raise ValueError("top_k must be a positive number less than or equal to 1000") + + if top_p is None: + top_p = 0.0 + elif top_p > 0.0 and top_k > 0.0: + raise ValueError("top_p and top_k sampling cannot be set together") + else: + if not (0.0 <= top_p <= 1.0): + raise ValueError("top_p must be less than or equal to 1.0") + + top_p_decay = kwargs.get("top_p_decay", 0.0) + if not (0.0 <= top_p_decay <= 1.0): + raise ValueError("top_p_decay must be less than or equal to 1.0") + + top_p_bound = kwargs.get("top_p_bound", 0.0) + if not (0.0 <= top_p_bound <= 1.0): + raise ValueError("top_p_bound must be less than or equal to 1.0") + + add_BOS = kwargs.get("add_BOS", False) + if not (isinstance(add_BOS, bool)): + raise ValueError("add_BOS must be a boolean") + + beam_width = num_beams + if beam_width is not None: + if not isinstance(beam_width, int): + raise ValueError("beam_width must be an integer") + if beam_width < 1: + raise ValueError("beam_width must be greater than 0") + if inputs.shape[0] > 1: + return "When doing beam_search, batch size must be 1" + + tokenizer = get_tokenizer() + + stop_token = kwargs.get("stop_token", tokenizer.eod) + if stop_token is not None: + if not isinstance(stop_token, int): + raise ValueError("stop_token must be an integer") + + if length_penalty is None: + length_penalty = 1.0 + + sizes_list = None + prompts_tokens_tensor = None + prompts_length_tensor = None + if torch.distributed.get_rank() == 0: + # Get the prompts length. + if attention_mask is None: + prompts_length_tensor = torch.cuda.LongTensor([inputs.shape[1]] * inputs.shape[0]) + else: + prompts_length_tensor = attention_mask.sum(axis=-1).cuda() + + if max_new_tokens is None: + max_new_tokens = max_length - inputs.shape[1] + if max_new_tokens <= 0: + raise ValueError("max_new_tokens must be greater than 0") + + if add_BOS: + max_length = max_new_tokens + inputs.shape[1] + 1 + # making sure that `max_length` is a multiple of 4 to leverage fused kernels + max_length = 4 * math.ceil(max_length / 4) + max_new_tokens = max_length - (inputs.shape[1] + 1) + padding = torch.cuda.LongTensor([[tokenizer.eod] * max_new_tokens] * inputs.shape[0]) + prompts_tokens_tensor = torch.concat( + [torch.unsqueeze(padding[:, 0], axis=-1), inputs.cuda(), padding], axis=-1 + ) + else: + # making sure that `max_length` is a multiple of 4 to leverage fused kernels + max_length = max_new_tokens + inputs.shape[1] + max_length = 4 * math.ceil(max_length / 4) + max_new_tokens = max_length - inputs.shape[1] + padding = torch.cuda.LongTensor([[tokenizer.eod] * max_new_tokens] * inputs.shape[0]) + prompts_tokens_tensor = torch.concat([inputs.cuda(), padding], axis=-1) + + # We need the sizes of these tensors for the boradcast + sizes_list = [ + prompts_tokens_tensor.size(0), # Batch size + prompts_tokens_tensor.size(1), + ] # Sequence lenght + + # First, broadcast the sizes. + sizes_tensor = broadcast_int_list(2, int_list=sizes_list, rank=0) + + # Now that we have the sizes, we can boradcast the tokens + # and length tensors. + sizes = sizes_tensor.tolist() + context_tokens_tensor = broadcast_tensor(sizes, torch.int64, tensor=prompts_tokens_tensor, rank=0) + context_length_tensor = broadcast_tensor(sizes[0], torch.int64, tensor=prompts_length_tensor, rank=0) + + # Run the inference + random_seed = kwargs.get("random_seed", 0) + torch.random.manual_seed(random_seed) + unwrapped_model = unwrap_model(self.base_model, (torchDDP, LocalDDP, Float16Module)) + if beam_width is not None: + tokens, _ = beam_search_and_return_on_first_stage( + unwrapped_model, + context_tokens_tensor, + context_length_tensor, + beam_width, + stop_token=stop_token, + num_return_gen=1, + length_penalty=length_penalty, + ) + else: + tokens, _, _ = generate_tokens_probs_and_return_on_first_stage( + unwrapped_model, + context_tokens_tensor, + context_length_tensor, + return_output_log_probs=False, + top_k=top_k, + top_p=top_p, + top_p_decay=top_p_decay, + top_p_bound=top_p_bound, + temperature=temperature, + use_eod_token_for_early_termination=True, + ) + return tokens + + +# other utilities +def avg_losses_across_data_parallel_group(losses): + """ + Average losses across data parallel group. + + Args: + losses (List[Tensor]): List of losses to average across data parallel group. + """ + + return average_losses_across_data_parallel_group(losses) + + +def gather_across_data_parallel_groups(tensor): + """ + Recursively gather tensor in a nested list/tuple/dictionary of tensors from data parallel ranks. + + Args: + tensor (nested list/tuple/dictionary of `torch.Tensor`): + The data to gather across data parallel ranks. + + """ + + def _gpu_gather_one(tensor): + if tensor.ndim == 0: + tensor = tensor.clone()[None] + output_tensors = [ + torch.empty_like(tensor) + for _ in range(torch.distributed.get_world_size(group=mpu.get_data_parallel_group())) + ] + torch.distributed.all_gather(output_tensors, tensor, group=mpu.get_data_parallel_group()) + return torch.cat(output_tensors, dim=0) + + return recursively_apply(_gpu_gather_one, tensor, error_on_other_type=True) diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/modeling.py b/venv/lib/python3.10/site-packages/accelerate/utils/modeling.py new file mode 100644 index 0000000000000000000000000000000000000000..d1d7f2bdf9984fd4fcfc098eddf1efe265d05464 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/modeling.py @@ -0,0 +1,1800 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib +import gc +import importlib +import inspect +import json +import logging +import os +import re +import shutil +import tempfile +import warnings +from collections import OrderedDict, defaultdict +from typing import Dict, List, Optional, Tuple, Union + +import packaging +import torch +import torch.nn as nn + +from ..state import AcceleratorState +from .constants import SAFE_WEIGHTS_NAME, WEIGHTS_NAME +from .dataclasses import AutocastKwargs, CustomDtype, DistributedType +from .imports import ( + is_mlu_available, + is_mps_available, + is_npu_available, + is_peft_available, + is_torch_xla_available, + is_xpu_available, +) +from .offload import load_offloaded_weight, offload_weight, save_offload_index +from .tqdm import is_tqdm_available, tqdm +from .versions import compare_versions + + +if is_npu_available(check_device=False): + import torch_npu # noqa: F401 + +if is_mlu_available(check_device=False): + import torch_mlu # noqa: F401 + +from safetensors import safe_open +from safetensors.torch import load_file as safe_load_file + + +WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json" + +logger = logging.getLogger(__name__) + + +def is_peft_model(model): + from .other import extract_model_from_parallel + + if is_peft_available(): + from peft import PeftModel + + return is_peft_available() and isinstance(extract_model_from_parallel(model), PeftModel) + + +def check_device_same(first_device, second_device): + """ + Utility method to check if two `torch` devices are similar. When dealing with CUDA devices, torch throws `False` + for `torch.device("cuda") == torch.device("cuda:0")` whereas they should be the same + + Args: + first_device (`torch.device`): + First device to check + second_device (`torch.device`): + Second device to check + """ + if first_device.type != second_device.type: + return False + + if first_device.type == "cuda" and first_device.index is None: + # In case the first_device is a cuda device and have + # the index attribute set to `None`, default it to `0` + first_device = torch.device("cuda", index=0) + + if second_device.type == "cuda" and second_device.index is None: + # In case the second_device is a cuda device and have + # the index attribute set to `None`, default it to `0` + second_device = torch.device("cuda", index=0) + + return first_device == second_device + + +def convert_file_size_to_int(size: Union[int, str]): + """ + Converts a size expressed as a string with digits an unit (like `"5MB"`) to an integer (in bytes). + + Args: + size (`int` or `str`): The size to convert. Will be directly returned if an `int`. + + Example: + + ```py + >>> convert_file_size_to_int("1MiB") + 1048576 + ``` + """ + mem_size = -1 + err_msg = ( + f"`size` {size} is not in a valid format. Use an integer for bytes, or a string with an unit (like '5.0GB')." + ) + try: + if isinstance(size, int): + mem_size = size + elif size.upper().endswith("GIB"): + mem_size = int(float(size[:-3]) * (2**30)) + elif size.upper().endswith("MIB"): + mem_size = int(float(size[:-3]) * (2**20)) + elif size.upper().endswith("KIB"): + mem_size = int(float(size[:-3]) * (2**10)) + elif size.upper().endswith("GB"): + int_size = int(float(size[:-2]) * (10**9)) + mem_size = int_size // 8 if size.endswith("b") else int_size + elif size.upper().endswith("MB"): + int_size = int(float(size[:-2]) * (10**6)) + mem_size = int_size // 8 if size.endswith("b") else int_size + elif size.upper().endswith("KB"): + int_size = int(float(size[:-2]) * (10**3)) + mem_size = int_size // 8 if size.endswith("b") else int_size + except ValueError: + raise ValueError(err_msg) + + if mem_size < 0: + raise ValueError(err_msg) + return mem_size + + +def dtype_byte_size(dtype: torch.dtype): + """ + Returns the size (in bytes) occupied by one parameter of type `dtype`. + + Example: + + ```py + >>> dtype_byte_size(torch.float32) + 4 + ``` + """ + if dtype == torch.bool: + return 1 / 8 + elif dtype == CustomDtype.INT2: + return 1 / 4 + elif dtype == CustomDtype.INT4: + return 1 / 2 + elif dtype == CustomDtype.FP8: + return 1 + bit_search = re.search(r"[^\d](\d+)$", str(dtype)) + if bit_search is None: + raise ValueError(f"`dtype` is not a valid dtype: {dtype}.") + bit_size = int(bit_search.groups()[0]) + return bit_size // 8 + + +def id_tensor_storage(tensor: torch.Tensor) -> Tuple[torch.device, int, int]: + """ + Unique identifier to a tensor storage. Multiple different tensors can share the same underlying storage. For + example, "meta" tensors all share the same storage, and thus their identifier will all be equal. This identifier is + guaranteed to be unique and constant for this tensor's storage during its lifetime. Two tensor storages with + non-overlapping lifetimes may have the same id. + """ + _SIZE = { + torch.int64: 8, + torch.float32: 4, + torch.int32: 4, + torch.bfloat16: 2, + torch.float16: 2, + torch.int16: 2, + torch.uint8: 1, + torch.int8: 1, + torch.bool: 1, + torch.float64: 8, + } + try: + storage_ptr = tensor.untyped_storage().data_ptr() + storage_size = tensor.untyped_storage().nbytes() + except Exception: + # Fallback for torch==1.10 + try: + storage_ptr = tensor.storage().data_ptr() + storage_size = tensor.storage().size() * _SIZE[tensor.dtype] + except NotImplementedError: + # Fallback for meta storage + storage_ptr = 0 + # On torch >=2.0 this is the tensor size + storage_size = tensor.nelement() * _SIZE[tensor.dtype] + + return tensor.device, storage_ptr, storage_size + + +def shard_checkpoint( + state_dict: Dict[str, torch.Tensor], max_shard_size: Union[int, str] = "10GB", weights_name: str = WEIGHTS_NAME +): + """ + Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a + given size. + + The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no + optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the + limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], + [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB]. + + + + If one of the model's weight is bigger that `max_sahrd_size`, it will end up in its own sub-checkpoint which will + have a size greater than `max_shard_size`. + + + + Args: + state_dict (`Dict[str, torch.Tensor]`): The state dictionary of a model to save. + max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): + The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit + (like `"5MB"`). + weights_name (`str`, *optional*, defaults to `"pytorch_model.bin"`): + The name of the model save file. + """ + max_shard_size = convert_file_size_to_int(max_shard_size) + + sharded_state_dicts = [{}] + last_block_size = 0 + total_size = 0 + storage_id_to_block = {} + + for key, weight in state_dict.items(): + # when bnb serialization is used the weights in the state dict can be strings + # check: https://github.com/huggingface/transformers/pull/24416 for more details + if isinstance(weight, str): + continue + else: + storage_id = id_tensor_storage(weight) + + # If a `weight` shares the same underlying storage as another tensor, we put `weight` in the same `block` + if storage_id in storage_id_to_block: + block_id = storage_id_to_block[storage_id] + sharded_state_dicts[block_id][key] = weight + continue + + weight_size = weight.numel() * dtype_byte_size(weight.dtype) + + # If this weight is going to tip up over the maximal size, we split. + if last_block_size + weight_size > max_shard_size: + sharded_state_dicts.append({}) + last_block_size = 0 + + sharded_state_dicts[-1][key] = weight + last_block_size += weight_size + total_size += weight_size + storage_id_to_block[storage_id] = len(sharded_state_dicts) - 1 + + # If we only have one shard, we return it + if len(sharded_state_dicts) == 1: + return {weights_name: sharded_state_dicts[0]}, None + + # Otherwise, let's build the index + weight_map = {} + shards = {} + for idx, shard in enumerate(sharded_state_dicts): + shard_file = weights_name.replace(".bin", f"-{idx + 1:05d}-of-{len(sharded_state_dicts):05d}.bin") + shard_file = shard_file.replace( + ".safetensors", f"-{idx + 1:05d}-of-{len(sharded_state_dicts):05d}.safetensors" + ) + shards[shard_file] = shard + for key in shard.keys(): + weight_map[key] = shard_file + + # Add the metadata + metadata = {"total_size": total_size} + index = {"metadata": metadata, "weight_map": weight_map} + return shards, index + + +def set_module_tensor_to_device( + module: nn.Module, + tensor_name: str, + device: Union[int, str, torch.device], + value: Optional[torch.Tensor] = None, + dtype: Optional[Union[str, torch.dtype]] = None, + fp16_statistics: Optional[torch.HalfTensor] = None, + tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None, +): + """ + A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing + `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function). + + Args: + module (`torch.nn.Module`): + The module in which the tensor we want to move lives. + tensor_name (`str`): + The full name of the parameter/buffer. + device (`int`, `str` or `torch.device`): + The device on which to set the tensor. + value (`torch.Tensor`, *optional*): + The value of the tensor (useful when going from the meta device to any other device). + dtype (`torch.dtype`, *optional*): + If passed along the value of the parameter will be cast to this `dtype`. Otherwise, `value` will be cast to + the dtype of the existing parameter in the model. + fp16_statistics (`torch.HalfTensor`, *optional*): + The list of fp16 statistics to set on the module, used for 8 bit model serialization. + tied_params_map (Dict[int, Dict[torch.device, torch.Tensor]], *optional*, defaults to `None`): + A map of current data pointers to dictionaries of devices to already dispatched tied weights. For a given + execution device, this parameter is useful to reuse the first available pointer of a shared weight on the + device for all others, instead of duplicating memory. + """ + # Recurse if needed + if "." in tensor_name: + splits = tensor_name.split(".") + for split in splits[:-1]: + new_module = getattr(module, split) + if new_module is None: + raise ValueError(f"{module} has no attribute {split}.") + module = new_module + tensor_name = splits[-1] + + if tensor_name not in module._parameters and tensor_name not in module._buffers: + raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.") + is_buffer = tensor_name in module._buffers + old_value = getattr(module, tensor_name) + + # Treat the case where old_value (or a custom `value`, typically offloaded to RAM/disk) belongs to a tied group, and one of the weight + # in the tied group has already been dispatched to the device, by avoiding reallocating memory on the device and just copying the pointer. + if ( + value is not None + and tied_params_map is not None + and value.data_ptr() in tied_params_map + and device in tied_params_map[value.data_ptr()] + ): + module._parameters[tensor_name] = tied_params_map[value.data_ptr()][device] + return + elif ( + tied_params_map is not None + and old_value.data_ptr() in tied_params_map + and device in tied_params_map[old_value.data_ptr()] + ): + module._parameters[tensor_name] = tied_params_map[old_value.data_ptr()][device] + return + + if old_value.device == torch.device("meta") and device not in ["meta", torch.device("meta")] and value is None: + raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}.") + + if value is not None: + if old_value.shape != value.shape: + raise ValueError( + f'Trying to set a tensor of shape {value.shape} in "{tensor_name}" (which has shape {old_value.shape}), this look incorrect.' + ) + + if dtype is None: + # For compatibility with PyTorch load_state_dict which converts state dict dtype to existing dtype in model + value = value.to(old_value.dtype) + elif not str(value.dtype).startswith(("torch.uint", "torch.int", "torch.bool")): + value = value.to(dtype) + + param = module._parameters[tensor_name] if tensor_name in module._parameters else None + param_cls = type(param) + + device_quantization = None + with torch.no_grad(): + # leave it on cpu first before moving them to cuda + # # fix the case where the device is meta, we don't want to put it on cpu because there is no data =0 + if ( + param is not None + and param.device.type != "cuda" + and torch.device(device).type == "cuda" + and param_cls.__name__ in ["Int8Params", "FP4Params", "Params4bit"] + ): + device_quantization = device + device = "cpu" + # `torch.Tensor.to()` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)). + if is_npu_available() and isinstance(device, int): + device = f"npu:{device}" + elif is_mlu_available() and isinstance(device, int): + device = f"mlu:{device}" + if is_xpu_available() and isinstance(device, int): + device = f"xpu:{device}" + if value is None: + new_value = old_value.to(device) + if dtype is not None and device in ["meta", torch.device("meta")]: + if not str(old_value.dtype).startswith(("torch.uint", "torch.int", "torch.bool")): + new_value = new_value.to(dtype) + + if not is_buffer: + module._parameters[tensor_name] = param_cls(new_value, requires_grad=old_value.requires_grad) + elif isinstance(value, torch.Tensor): + new_value = value.to(device) + else: + new_value = torch.tensor(value, device=device) + if device_quantization is not None: + device = device_quantization + if is_buffer: + module._buffers[tensor_name] = new_value + elif value is not None or not check_device_same(torch.device(device), module._parameters[tensor_name].device): + param_cls = type(module._parameters[tensor_name]) + kwargs = module._parameters[tensor_name].__dict__ + if param_cls.__name__ in ["Int8Params", "FP4Params"]: + if param_cls.__name__ == "Int8Params" and new_value.dtype == torch.float32: + # downcast to fp16 if any - needed for 8bit serialization + new_value = new_value.to(torch.float16) + # quantize module that are going to stay on the cpu so that we offload quantized weights + if device == "cpu" and param_cls.__name__ == "Int8Params": + new_value = param_cls(new_value, requires_grad=old_value.requires_grad, **kwargs).to(0).to("cpu") + new_value.CB = new_value.CB.to("cpu") + new_value.SCB = new_value.SCB.to("cpu") + else: + new_value = param_cls(new_value, requires_grad=old_value.requires_grad, **kwargs).to(device) + elif param_cls.__name__ in ["QTensor", "QBitsTensor"]: + new_value = torch.nn.Parameter(new_value, requires_grad=old_value.requires_grad).to(device) + else: + new_value = param_cls(new_value, requires_grad=old_value.requires_grad).to(device) + + module._parameters[tensor_name] = new_value + if fp16_statistics is not None: + module._parameters[tensor_name].SCB = fp16_statistics.to(device) + del fp16_statistics + # as we put the weight to meta, it doesn't have SCB attr anymore. make sure that it is not a meta weight + if ( + module.__class__.__name__ == "Linear8bitLt" + and getattr(module.weight, "SCB", None) is None + and str(module.weight.device) != "meta" + ): + # quantize only if necessary + device_index = torch.device(device).index if torch.device(device).type == "cuda" else None + if not getattr(module.weight, "SCB", None) and device_index is not None: + if module.bias is not None and module.bias.device.type != "meta": + # if a bias exists, we need to wait until the bias is set on the correct device + module = module.cuda(device_index) + elif module.bias is None: + # if no bias exists, we can quantize right away + module = module.cuda(device_index) + elif module.__class__.__name__ == "Linear4bit" and getattr(module.weight, "quant_state", None) is None: + # quantize only if necessary + device_index = torch.device(device).index if torch.device(device).type == "cuda" else None + if not getattr(module.weight, "quant_state", None) and device_index is not None: + module.weight = module.weight.cuda(device_index) + # clean pre and post foward hook + if is_npu_available(): + torch.npu.empty_cache() + elif is_mlu_available(): + torch.mlu.empty_cache() + elif is_xpu_available(): + torch.xpu.empty_cache() + else: + torch.cuda.empty_cache() + + # When handling tied weights, we update tied_params_map to keep track of the tied weights that have already been allocated on the device in + # order to avoid duplicating memory, see above. + if ( + tied_params_map is not None + and old_value.data_ptr() in tied_params_map + and device not in tied_params_map[old_value.data_ptr()] + ): + tied_params_map[old_value.data_ptr()][device] = new_value + elif ( + value is not None + and tied_params_map is not None + and value.data_ptr() in tied_params_map + and device not in tied_params_map[value.data_ptr()] + ): + tied_params_map[value.data_ptr()][device] = new_value + + +def named_module_tensors( + module: nn.Module, include_buffers: bool = True, recurse: bool = False, remove_non_persistent: bool = False +): + """ + A helper function that gathers all the tensors (parameters + buffers) of a given module. If `include_buffers=True` + it's the same as doing `module.named_parameters(recurse=recurse) + module.named_buffers(recurse=recurse)`. + + Args: + module (`torch.nn.Module`): + The module we want the tensors on. + include_buffer (`bool`, *optional*, defaults to `True`): + Whether or not to include the buffers in the result. + recurse (`bool`, *optional`, defaults to `False`): + Whether or not to go look in every submodule or just return the direct parameters and buffers. + remove_non_persistent (`bool`, *optional*, defaults to `False`): + Whether or not to remove the non persistent buffer from the buffers. Useful only when include_buffers = + True + """ + yield from module.named_parameters(recurse=recurse) + + if include_buffers: + non_persistent_buffers = set() + if remove_non_persistent: + non_persistent_buffers = get_non_persistent_buffers(module, recurse=recurse) + for named_buffer in module.named_buffers(recurse=recurse): + name, _ = named_buffer + if name not in non_persistent_buffers: + yield named_buffer + + +def get_non_persistent_buffers(module: nn.Module, recurse: bool = False): + """ + Gather all non persistent buffers of a given modules into a set + + Args: + module (`nn.Module`): + The module we want the non persistent buffers on. + recurse (`bool`, *optional*, defaults to `False`): + Whether or not to go look in every submodule or just return the direct non persistent buffers. + """ + + non_persistent_buffers_set = module._non_persistent_buffers_set + if recurse: + for _, m in module.named_modules(): + non_persistent_buffers_set |= m._non_persistent_buffers_set + + return non_persistent_buffers_set + + +class FindTiedParametersResult(list): + """ + This is a subclass of a list to handle backward compatibility for Transformers. Do not rely on the fact this is not + a list or on the `values` method as in the future this will be removed. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def values(self): + # TODO: at the next Transformers release (4.28.0) issue a deprecation warning here. + return sum([x[1:] for x in self], []) + + +def check_tied_parameters_in_config(model: nn.Module): + """ + Check if there is any indication in the given model that some weights should be tied. + + Args: + model (`torch.nn.Module`): The model to inspect + + Returns: + bool: True if the model needs to have tied weights + """ + + # based on model.tie_weights() method + has_tied_word_embedding = False + has_tied_encoder_decoder = False + has_tied_module = False + + if "PreTrainedModel" in [c.__name__ for c in inspect.getmro(model.__class__)]: + has_tied_word_embedding = ( + hasattr(model, "config") + and getattr(model.config, "tie_word_embeddings", False) + and model.get_output_embeddings() + ) + has_tied_encoder_decoder = ( + hasattr(model, "config") + and getattr(model.config, "is_encoder_decoder", False) + and getattr(model.config, "tie_encoder_decoder", False) + ) + has_tied_module = any(hasattr(module, "_tie_weights") for module in model.modules()) + + return any([has_tied_word_embedding, has_tied_encoder_decoder, has_tied_module]) + + +def _get_param_device(param, device_map): + if param in device_map: + return device_map[param] + parent_param = ".".join(param.split(".")[:-1]) + if parent_param == param: + raise ValueError(f"The `device_map` does not contain the module {param}.") + else: + return _get_param_device(parent_param, device_map) + + +def check_tied_parameters_on_same_device(tied_params, device_map): + """ + Check if tied parameters are on the same device + + Args: + tied_params (`List[List[str]]`): + A list of lists of parameter names being all tied together. + + device_map (`Dict[str, Union[int, str, torch.device]]`): + A map that specifies where each submodule should go. + + """ + for tie_param in tied_params: + tie_param_devices = {} + for param in tie_param: + tie_param_devices[param] = _get_param_device(param, device_map) + if len(set(tie_param_devices.values())) > 1: + logger.warn( + f"Tied parameters are on different devices: {tie_param_devices}. " + "Please modify your custom device map or set `device_map='auto'`. " + ) + + +def find_tied_parameters(model: nn.Module, **kwargs): + """ + Find the tied parameters in a given model. + + + + The signature accepts keyword arguments, but they are for the recursive part of this function and you should ignore + them. + + + + Args: + model (`torch.nn.Module`): The model to inspect. + + Returns: + List[List[str]]: A list of lists of parameter names being all tied together. + + Example: + + ```py + >>> from collections import OrderedDict + >>> import torch.nn as nn + + >>> model = nn.Sequential(OrderedDict([("linear1", nn.Linear(4, 4)), ("linear2", nn.Linear(4, 4))])) + >>> model.linear2.weight = model.linear1.weight + >>> find_tied_parameters(model) + [['linear1.weight', 'linear2.weight']] + ``` + """ + # Initialize result and named_parameters before recursing. + named_parameters = kwargs.get("named_parameters", None) + prefix = kwargs.get("prefix", "") + result = kwargs.get("result", {}) + + if named_parameters is None: + named_parameters = {n: p for n, p in model.named_parameters()} + else: + # A tied parameter will not be in the full `named_parameters` seen above but will be in the `named_parameters` + # of the submodule it belongs to. So while recursing we track the names that are not in the initial + # `named_parameters`. + for name, parameter in model.named_parameters(): + full_name = name if prefix == "" else f"{prefix}.{name}" + if full_name not in named_parameters: + # When we find one, it has to be one of the existing parameters. + for new_name, new_param in named_parameters.items(): + if new_param is parameter: + if new_name not in result: + result[new_name] = [] + result[new_name].append(full_name) + + # Once we have treated direct parameters, we move to the child modules. + for name, child in model.named_children(): + child_name = name if prefix == "" else f"{prefix}.{name}" + find_tied_parameters(child, named_parameters=named_parameters, prefix=child_name, result=result) + + return FindTiedParametersResult([sorted([weight] + list(set(tied))) for weight, tied in result.items()]) + + +def retie_parameters(model, tied_params): + """ + Reties tied parameters in a given model if the link was broken (for instance when adding hooks). + + Args: + model (`torch.nn.Module`): + The model in which to retie parameters. + tied_params (`List[List[str]]`): + A mapping parameter name to tied parameter name as obtained by `find_tied_parameters`. + """ + for tied_group in tied_params: + param_to_tie = None + # two loops : the first one to set param_to_tie , the second one to change the values of tied_group + for param_name in tied_group: + module = model + splits = param_name.split(".") + for split in splits[:-1]: + module = getattr(module, split) + param = getattr(module, splits[-1]) + if param_to_tie is None and param.device != torch.device("meta"): + param_to_tie = param + break + if param_to_tie is not None: + for param_name in tied_group: + module = model + splits = param_name.split(".") + for split in splits[:-1]: + module = getattr(module, split) + setattr(module, splits[-1], param_to_tie) + + +def _get_proper_dtype(dtype: Union[str, torch.device]) -> torch.dtype: + """ + Just does torch.dtype(dtype) if necessary. + """ + if isinstance(dtype, str): + # We accept "torch.float16" or just "float16" + dtype = dtype.replace("torch.", "") + dtype = getattr(torch, dtype) + return dtype + + +def compute_module_sizes( + model: nn.Module, + dtype: Optional[Union[str, torch.device]] = None, + special_dtypes: Optional[Dict[str, Union[str, torch.device]]] = None, + buffers_only: bool = False, +): + """ + Compute the size of each submodule of a given model. + """ + if dtype is not None: + dtype = _get_proper_dtype(dtype) + dtype_size = dtype_byte_size(dtype) + if special_dtypes is not None: + special_dtypes = {key: _get_proper_dtype(dtyp) for key, dtyp in special_dtypes.items()} + special_dtypes_size = {key: dtype_byte_size(dtyp) for key, dtyp in special_dtypes.items()} + module_sizes = defaultdict(int) + + module_list = [] + + if not buffers_only: + module_list = named_module_tensors(model, recurse=True) + else: + module_list = model.named_buffers(recurse=True) + + for name, tensor in module_list: + if special_dtypes is not None and name in special_dtypes: + size = tensor.numel() * special_dtypes_size[name] + elif dtype is None: + size = tensor.numel() * dtype_byte_size(tensor.dtype) + elif str(tensor.dtype).startswith(("torch.uint", "torch.int", "torch.bool")): + # According to the code in set_module_tensor_to_device, these types won't be converted + # so use their original size here + size = tensor.numel() * dtype_byte_size(tensor.dtype) + else: + size = tensor.numel() * min(dtype_size, dtype_byte_size(tensor.dtype)) + name_parts = name.split(".") + for idx in range(len(name_parts) + 1): + module_sizes[".".join(name_parts[:idx])] += size + + return module_sizes + + +def compute_module_total_buffer_size( + model: nn.Module, + dtype: Optional[Union[str, torch.device]] = None, + special_dtypes: Optional[Dict[str, Union[str, torch.device]]] = None, +): + """ + Compute the total size of buffers in each submodule of a given model. + """ + module_sizes = compute_module_sizes(model, dtype=dtype, special_dtypes=special_dtypes, buffers_only=True) + return module_sizes.get("", 0) + + +def get_max_layer_size( + modules: List[Tuple[str, torch.nn.Module]], module_sizes: Dict[str, int], no_split_module_classes: List[str] +): + """ + Utility function that will scan a list of named modules and return the maximum size used by one full layer. The + definition of a layer being: + - a module with no direct children (just parameters and buffers) + - a module whose class name is in the list `no_split_module_classes` + + Args: + modules (`List[Tuple[str, torch.nn.Module]]`): + The list of named modules where we want to determine the maximum layer size. + module_sizes (`Dict[str, int]`): + A dictionary mapping each layer name to its size (as generated by `compute_module_sizes`). + no_split_module_classes (`List[str]`): + A list of class names for layers we don't want to be split. + + Returns: + `Tuple[int, List[str]]`: The maximum size of a layer with the list of layer names realizing that maximum size. + """ + max_size = 0 + layer_names = [] + modules_to_treat = modules.copy() + while len(modules_to_treat) > 0: + module_name, module = modules_to_treat.pop(0) + modules_children = list(module.named_children()) if isinstance(module, torch.nn.Module) else [] + if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes: + # No splitting this one so we compare to the max_size + size = module_sizes[module_name] + if size > max_size: + max_size = size + layer_names = [module_name] + elif size == max_size: + layer_names.append(module_name) + else: + modules_to_treat = [(f"{module_name}.{n}", v) for n, v in modules_children] + modules_to_treat + return max_size, layer_names + + +def get_max_memory(max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None): + """ + Get the maximum memory available if nothing is passed, converts string to int otherwise. + """ + import psutil + + if max_memory is None: + if not (torch.cuda.is_available() or is_npu_available() or is_mlu_available() or is_xpu_available()): + max_memory = {} + + else: + # Make sure CUDA is initialized on each GPU to have the right memory info. + if is_npu_available(): + for i in range(torch.npu.device_count()): + _ = torch.tensor(0, device=torch.device("npu", i)) + max_memory = {i: torch.npu.mem_get_info(i)[0] for i in range(torch.npu.device_count())} + elif is_mlu_available(): + for i in range(torch.mlu.device_count()): + _ = torch.tensor(0, device=torch.device("mlu", i)) + max_memory = {i: torch.mlu.mem_get_info(i)[0] for i in range(torch.mlu.device_count())} + elif is_xpu_available(): + for i in range(torch.xpu.device_count()): + _ = torch.tensor(0, device=torch.device("xpu", i)) + max_memory = {i: torch.xpu.max_memory_allocated(i) for i in range(torch.xpu.device_count())} + else: + for i in range(torch.cuda.device_count()): + _ = torch.tensor([0], device=i) + max_memory = {i: torch.cuda.mem_get_info(i)[0] for i in range(torch.cuda.device_count())} + # allocate everything in the mps device as the RAM is shared + if is_mps_available(): + max_memory["mps"] = psutil.virtual_memory().available + else: + max_memory["cpu"] = psutil.virtual_memory().available + return max_memory + + for key in max_memory: + if isinstance(max_memory[key], str): + max_memory[key] = convert_file_size_to_int(max_memory[key]) + + # Need to sort the device by type to make sure that we allocate the gpu first. + # As gpu/npu/xpu are represented by int, we need to sort them first. + gpu_devices = [k for k in max_memory.keys() if isinstance(k, int)] + gpu_devices.sort() + # check if gpu/npu/xpu devices are available and if not, throw a warning + if is_npu_available(): + num_devices = torch.npu.device_count() + elif is_mlu_available(): + num_devices = torch.mlu.device_count() + elif is_xpu_available(): + num_devices = torch.xpu.device_count() + else: + num_devices = torch.cuda.device_count() + for device in gpu_devices: + if device >= num_devices or device < 0: + logger.warning(f"Device {device} is not available, available devices are {list(range(num_devices))}") + # Add the other devices in the preset order if they are available + all_devices = gpu_devices + [k for k in ["mps", "cpu", "disk"] if k in max_memory.keys()] + # Raise an error if a device is not recognized + for k in max_memory.keys(): + if k not in all_devices: + raise ValueError( + f"Device {k} is not recognized, available devices are integers(for GPU/XPU), 'mps', 'cpu' and 'disk'" + ) + max_memory = {k: max_memory[k] for k in all_devices} + + return max_memory + + +def clean_device_map(device_map: Dict[str, Union[int, str, torch.device]], module_name: str = ""): + """ + Cleans a device_map by grouping all submodules that go on the same device together. + """ + # Get the value of the current module and if there is only one split across several keys, regroup it. + prefix = "" if module_name == "" else f"{module_name}." + values = [v for k, v in device_map.items() if k.startswith(prefix)] + if len(set(values)) == 1 and len(values) > 1: + for k in [k for k in device_map if k.startswith(prefix)]: + del device_map[k] + device_map[module_name] = values[0] + + # Recurse over the children + children_modules = [k for k in device_map.keys() if k.startswith(prefix) and len(k) > len(module_name)] + idx = len(module_name.split(".")) + 1 if len(module_name) > 0 else 1 + children_modules = set(".".join(k.split(".")[:idx]) for k in children_modules) + for child in children_modules: + clean_device_map(device_map, module_name=child) + + return device_map + + +def load_offloaded_weights(model, index, offload_folder): + """ + Loads the weights from the offload folder into the model. + + Args: + model (`torch.nn.Module`): + The model to load the weights into. + index (`dict`): + A dictionary containing the parameter name and its metadata for each parameter that was offloaded from the + model. + offload_folder (`str`): + The folder where the offloaded weights are stored. + """ + if index is None or len(index) == 0: + # Nothing to do + return + for param_name, metadata in index.items(): + if "SCB" in param_name: + continue + fp16_statistics = None + if "weight" in param_name and param_name.replace("weight", "SCB") in index.keys(): + weight_name = param_name.replace("weight", "SCB") + fp16_statistics = load_offloaded_weight( + os.path.join(offload_folder, f"{weight_name}.dat"), index[weight_name] + ) + tensor_file = os.path.join(offload_folder, f"{param_name}.dat") + weight = load_offloaded_weight(tensor_file, metadata) + set_module_tensor_to_device(model, param_name, "cpu", value=weight, fp16_statistics=fp16_statistics) + + +def get_balanced_memory( + model: nn.Module, + max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None, + no_split_module_classes: Optional[List[str]] = None, + dtype: Optional[Union[str, torch.dtype]] = None, + special_dtypes: Optional[Dict[str, Union[str, torch.device]]] = None, + low_zero: bool = False, +): + """ + Compute a `max_memory` dictionary for [`infer_auto_device_map`] that will balance the use of each available GPU. + + + + All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the + meta device (as it would if initialized within the `init_empty_weights` context manager). + + + + Args: + model (`torch.nn.Module`): + The model to analyze. + max_memory (`Dict`, *optional*): + A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset. + Example: `max_memory={0: "1GB"}`. + no_split_module_classes (`List[str]`, *optional*): + A list of layer class names that should never be split across device (for instance any layer that has a + residual connection). + dtype (`str` or `torch.dtype`, *optional*): + If provided, the weights will be converted to that type when loaded. + special_dtypes (`Dict[str, Union[str, torch.device]]`, *optional*): + If provided, special dtypes to consider for some specific weights (will override dtype used as default for + all weights). + low_zero (`bool`, *optional*): + Minimizes the number of weights on GPU 0, which is convenient when it's used for other operations (like the + Transformers generate function). + """ + # Get default / clean up max_memory + user_not_set_max_memory = max_memory is None + max_memory = get_max_memory(max_memory) + + if is_npu_available(): + num_devices = len([d for d in max_memory if torch.device(d).type == "npu" and max_memory[d] > 0]) + elif is_mlu_available(): + num_devices = len([d for d in max_memory if torch.device(d).type == "mlu" and max_memory[d] > 0]) + elif is_xpu_available(): + num_devices = len( + [ + d + for d in max_memory + if ( + d != "cpu" + and (torch.device(d).type == "xpu" or torch.xpu.get_device_properties(d).dev_type == "gpu") + ) + and max_memory[d] > 0 + ] + ) + else: + num_devices = len([d for d in max_memory if torch.device(d).type == "cuda" and max_memory[d] > 0]) + + if num_devices == 0: + return max_memory + + if num_devices == 1: + # We cannot do low_zero on just one GPU, but we will still reserve some memory for the buffer + low_zero = False + # If user just asked us to handle memory usage, we should avoid OOM + if user_not_set_max_memory: + for key in max_memory.keys(): + if isinstance(key, int): + max_memory[key] *= 0.9 # 90% is a good compromise + logger.info( + f"We will use 90% of the memory on device {key} for storing the model, and 10% for the buffer to avoid OOM. " + "You can set `max_memory` in to a higher value to use more memory (at your own risk)." + ) + break # only one device + + module_sizes = compute_module_sizes(model, dtype=dtype, special_dtypes=special_dtypes) + per_gpu = module_sizes[""] // (num_devices - 1 if low_zero else num_devices) + + # We can't just set the memory to model_size // num_devices as it will end being too small: each GPU will get + # slightly less layers and some layers will end up offload at the end. So this function computes a buffer size to + # add which is the biggest of: + # - the size of no split block (if applicable) + # - the mean of the layer sizes + if no_split_module_classes is None: + no_split_module_classes = [] + elif not isinstance(no_split_module_classes, (list, tuple)): + no_split_module_classes = [no_split_module_classes] + + # Identify the size of the no_split_block modules + if len(no_split_module_classes) > 0: + no_split_children = {} + for name, size in module_sizes.items(): + if name == "": + continue + submodule = model + for submodule_name in name.split("."): + submodule = getattr(submodule, submodule_name) + class_name = submodule.__class__.__name__ + if class_name in no_split_module_classes and class_name not in no_split_children: + no_split_children[class_name] = size + + if set(no_split_children.keys()) == set(no_split_module_classes): + break + buffer = max(no_split_children.values()) if len(no_split_children) > 0 else 0 + else: + buffer = 0 + + # Compute mean of final modules. In the first dict of module sizes, leaves are the parameters + leaves = [n for n in module_sizes if len([p for p in module_sizes if n == "" or p.startswith(n + ".")]) == 0] + module_sizes = {n: v for n, v in module_sizes.items() if n not in leaves} + # Once removed, leaves are the final modules. + leaves = [n for n in module_sizes if len([p for p in module_sizes if n == "" or p.startswith(n + ".")]) == 0] + mean_leaves = int(sum([module_sizes[n] for n in leaves]) / max(len(leaves), 1)) + buffer = int(1.25 * max(buffer, mean_leaves)) + per_gpu += buffer + + # Sorted list of GPUs id (we may have some gpu ids not included in the our max_memory list - let's ignore them) + gpus_idx_list = list( + sorted( + device_id for device_id, device_mem in max_memory.items() if isinstance(device_id, int) and device_mem > 0 + ) + ) + # The last device is left with max_memory just in case the buffer is not enough. + for idx in gpus_idx_list[:-1]: + max_memory[idx] = min(max_memory[0] if low_zero and idx == 0 else per_gpu, max_memory[idx]) + + if low_zero: + min_zero = max(0, module_sizes[""] - sum([max_memory[i] for i in range(1, num_devices)])) + max_memory[0] = min(min_zero, max_memory[0]) + + return max_memory + + +def calculate_maximum_sizes(model: torch.nn.Module): + "Computes the total size of the model and its largest layer" + sizes = compute_module_sizes(model) + # `transformers` models store this information for us + no_split_modules = getattr(model, "_no_split_modules", None) + if no_split_modules is None: + no_split_modules = [] + + modules_to_treat = ( + list(model.named_parameters(recurse=False)) + + list(model.named_children()) + + list(model.named_buffers(recurse=False)) + ) + largest_layer = get_max_layer_size(modules_to_treat, sizes, no_split_modules) + total_size = sizes[""] + return total_size, largest_layer + + +def infer_auto_device_map( + model: nn.Module, + max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None, + no_split_module_classes: Optional[List[str]] = None, + dtype: Optional[Union[str, torch.dtype]] = None, + special_dtypes: Optional[Dict[str, Union[str, torch.dtype]]] = None, + verbose: bool = False, + clean_result: bool = True, + offload_buffers: bool = False, +): + """ + Compute a device map for a given model giving priority to GPUs, then offload on CPU and finally offload to disk, + such that: + - we don't exceed the memory available of any of the GPU. + - if offload to the CPU is needed, there is always room left on GPU 0 to put back the layer offloaded on CPU that + has the largest size. + - if offload to the CPU is needed,we don't exceed the RAM available on the CPU. + - if offload to the disk is needed, there is always room left on the CPU to put back the layer offloaded on disk + that has the largest size. + + + + All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the + meta device (as it would if initialized within the `init_empty_weights` context manager). + + + + Args: + model (`torch.nn.Module`): + The model to analyze. + max_memory (`Dict`, *optional*): + A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset. + Example: `max_memory={0: "1GB"}`. + no_split_module_classes (`List[str]`, *optional*): + A list of layer class names that should never be split across device (for instance any layer that has a + residual connection). + dtype (`str` or `torch.dtype`, *optional*): + If provided, the weights will be converted to that type when loaded. + special_dtypes (`Dict[str, Union[str, torch.device]]`, *optional*): + If provided, special dtypes to consider for some specific weights (will override dtype used as default for + all weights). + verbose (`bool`, *optional*, defaults to `False`): + Whether or not to provide debugging statements as the function builds the device_map. + clean_result (`bool`, *optional*, defaults to `True`): + Clean the resulting device_map by grouping all submodules that go on the same device together. + offload_buffers (`bool`, *optional*, defaults to `False`): + In the layers that are offloaded on the CPU or the hard drive, whether or not to offload the buffers as + well as the parameters. + """ + # Get default / clean up max_memory + max_memory = get_max_memory(max_memory) + if no_split_module_classes is None: + no_split_module_classes = [] + elif not isinstance(no_split_module_classes, (list, tuple)): + no_split_module_classes = [no_split_module_classes] + + devices = list(max_memory.keys()) + if "disk" not in devices: + devices.append("disk") + gpus = [device for device in devices if device not in ["cpu", "disk"]] + + # Devices that need to keep space for a potential offloaded layer. + if "mps" in gpus: + main_devices = ["mps"] + elif len(gpus) > 0: + main_devices = [gpus[0], "cpu"] + else: + main_devices = ["cpu"] + + module_sizes = compute_module_sizes(model, dtype=dtype, special_dtypes=special_dtypes) + tied_parameters = find_tied_parameters(model) + + if check_tied_parameters_in_config(model) and len(tied_parameters) == 0: + logger.warn( + "The model weights are not tied. Please use the `tie_weights` method before using the `infer_auto_device` function." + ) + + device_map = OrderedDict() + current_device = 0 + current_memory_used = 0 + device_memory_used = {} + device_buffer_sizes = {} + + # Direct submodules and parameters + modules_to_treat = ( + list(model.named_parameters(recurse=False)) + + list(model.named_children()) + + list(model.named_buffers(recurse=False)) + ) + # Initialize maximum largest layer, to know which space to keep in memory + max_layer_size, max_layer_names = get_max_layer_size(modules_to_treat, module_sizes, no_split_module_classes) + + # Ready ? This is going to be a bit messy. + while len(modules_to_treat) > 0: + name, module = modules_to_treat.pop(0) + if verbose: + print(f"\nTreating module {name}.") + # Max size in the remaining layers may have changed since we took one, so we maybe update it. + max_layer_names = [n for n in max_layer_names if n != name and not n.startswith(name + ".")] + if len(max_layer_names) == 0: + max_layer_size, max_layer_names = get_max_layer_size( + [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)], + module_sizes, + no_split_module_classes, + ) + # Assess size needed + module_size = module_sizes[name] + + # We keep relevant tied parameters only: one of the tied parameters in the group is inside the current module + # and the other is not. + # Note: If we are currently processing the name `compute.weight`, an other parameter named e.g. `compute.weight_submodule.parameter` + # needs to be considered outside the current module, hence the check with additional dots. + tied_param_goups = [ + tied_group + for tied_group in tied_parameters + if any(name + "." in k + "." for k in tied_group) and not all(name + "." in k + "." for k in tied_group) + ] + + if verbose and len(tied_param_goups) > 0: + print(f" Found the relevant tied param groups {tied_param_goups}") + + # Then we keep track of all the parameters that are tied to the current module, but not in the current module + tied_params = sum( + [[p for p in tied_group if name + "." not in p + "."] for tied_group in tied_param_goups], [] + ) + + if verbose and len(tied_params) > 0: + print(f" So those parameters need to be taken into account {tied_params}") + + device = devices[current_device] + current_max_size = max_memory[device] if device != "disk" else None + current_memory_reserved = 0 + # Reduce max size available by the largest layer. + if devices[current_device] in main_devices: + current_max_size = current_max_size - max_layer_size + current_memory_reserved = max_layer_size + # Case 1 -> We're too big! + if current_max_size is not None and current_memory_used + module_size > current_max_size: + # Split or not split? + modules_children = ( + [] + if isinstance(module, nn.Parameter) or isinstance(module, torch.Tensor) + else list(module.named_children()) + ) + if verbose: + print( + f"Not enough space on {devices[current_device]} to put {name} (space available " + f"{current_max_size - current_memory_used}, module size {module_size})." + ) + if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes: + # -> no split, we go to the next device + if verbose: + print("This module cannot be split, going to the next device.") + + device_memory_used[device] = current_memory_used + current_memory_reserved + current_device += 1 + modules_to_treat = [(name, module)] + modules_to_treat + current_memory_used = 0 + else: + # -> split, we replace the module studied by its children + parameters + if verbose: + print(f"Splitting {name}.") + modules_children = list(module.named_parameters(recurse=False)) + modules_children + modules_to_treat = [(f"{name}.{n}", v) for n, v in modules_children] + modules_to_treat + # Update the max layer size. + max_layer_size, max_layer_names = get_max_layer_size( + [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)], + module_sizes, + no_split_module_classes, + ) + + # Case 2, it fits! We're not entirely out of the wood though, because we may have some tied parameters. + elif len(tied_params) > 0: + # First locate all tied modules + tied_module_names = [] + tied_modules = [] + for tied_param in tied_params: + tied_module_index = [i for i, (n, _) in enumerate(modules_to_treat) if n in tied_param][0] + tied_module_names.append(modules_to_treat[tied_module_index][0]) + tied_modules.append(modules_to_treat[tied_module_index][1]) + if verbose: + print( + f" It looks like {name} is going to fit on {devices[current_device]} but we have tied " + f"parameters to account for.\n - Names {tied_params}\n - Module names {tied_module_names}" + ) + + # Let's see if it all fits first + module_size_with_ties = module_size + for tied_param, tied_module_name in zip(tied_params, tied_module_names): + module_size_with_ties += module_sizes[tied_module_name] - module_sizes[tied_param] + + if current_max_size is None or current_memory_used + module_size_with_ties <= current_max_size: + # We really really fit! + if verbose: + print(f"Putting {name} and {tied_module_names} on {devices[current_device]}.") + current_memory_used += module_size_with_ties + device_map[name] = devices[current_device] + for tied_module_name in tied_module_names: + if tied_module_name in [m[0] for m in modules_to_treat]: + # The module may have been removed by a previous iteration of this loop. + tied_module_index = [i for i, (n, _) in enumerate(modules_to_treat) if n == tied_module_name][ + 0 + ] + modules_to_treat.pop(tied_module_index) + device_map[tied_module_name] = devices[current_device] + + if not offload_buffers and isinstance(module, nn.Module): + current_buffer_size = compute_module_total_buffer_size( + module, dtype=dtype, special_dtypes=special_dtypes + ) + device_buffer_sizes[device] = device_buffer_sizes.get(device, 0) + current_buffer_size + + else: + # We don't fit with the tied modules. Next question is: can we split one of the tied modules to make it + # smaller or do we need to go on the next device? + if verbose: + print( + f"Not enough space on {devices[current_device]} to put {name} and {tied_module_names} (space " + f"available {current_max_size - current_memory_used}, needed size {module_size_with_ties})." + ) + split_happened = False + for tied_module_name, tied_module in zip(tied_module_names, tied_modules): + tied_module_children = list(tied_module.named_children()) + if len(tied_module_children) == 0 or tied_module.__class__.__name__ in no_split_module_classes: + # can't break this one. + continue + + if verbose: + print(f"Splitting {tied_module_name}.") + tied_module_children = list(tied_module.named_parameters(recurse=False)) + tied_module_children + tied_module_children = [(f"{tied_module_name}.{n}", v) for n, v in tied_module_children] + tied_module_index = [i for i, (n, _) in enumerate(modules_to_treat) if n == tied_module_name][0] + + modules_to_treat = ( + [(name, module)] + + modules_to_treat[:tied_module_index] + + tied_module_children + + modules_to_treat[tied_module_index + 1 :] + ) + # Update the max layer size. + max_layer_size, max_layer_names = get_max_layer_size( + [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)], + module_sizes, + no_split_module_classes, + ) + split_happened = True + break + + if not split_happened: + # If the tied module is not split, we go to the next device + if verbose: + print("None of the tied module can be split, going to the next device.") + + device_memory_used[device] = current_memory_used + current_memory_reserved + current_device += 1 + modules_to_treat = [(name, module)] + modules_to_treat + current_memory_used = 0 + + else: + if verbose: + if current_max_size is None: + print(f"Putting {name} (size={module_size}) on {devices[current_device]}.") + else: + print( + f"Putting {name} (size={module_size}) on {devices[current_device]} " + f"(available={current_max_size - current_memory_used})." + ) + current_memory_used += module_size + device_memory_used[device] = current_memory_used + current_memory_reserved + device_map[name] = devices[current_device] + + if not offload_buffers and isinstance(module, nn.Module): + current_buffer_size = compute_module_total_buffer_size( + module, dtype=dtype, special_dtypes=special_dtypes + ) + device_buffer_sizes[device] = device_buffer_sizes.get(device, 0) + current_buffer_size + + if clean_result: + device_map = clean_device_map(device_map) + + non_gpu_buffer_size = device_buffer_sizes.get("cpu", 0) + device_buffer_sizes.get("disk", 0) + if non_gpu_buffer_size > 0 and not offload_buffers: + is_buffer_fit_any_gpu = False + for gpu_device, gpu_max_memory in max_memory.items(): + if gpu_device == "cpu" or gpu_device == "disk": + continue + + if not is_buffer_fit_any_gpu: + gpu_memory_used = device_memory_used.get(gpu_device, 0) + + if gpu_max_memory >= non_gpu_buffer_size + gpu_memory_used: + is_buffer_fit_any_gpu = True + + if len(gpus) > 0 and not is_buffer_fit_any_gpu: + warnings.warn( + f"Current model requires {non_gpu_buffer_size} bytes of buffer for offloaded layers, which seems does " + f"not fit any GPU's remaining memory. If you are experiencing a OOM later, please consider using " + f"offload_buffers=True." + ) + + return device_map + + +def check_device_map(model: nn.Module, device_map: Dict[str, Union[int, str, torch.device]]): + """ + Checks a device map covers everything in a given model. + + Args: + model (`torch.nn.Module`): The model to check the device map against. + device_map (`Dict[str, Union[int, str, torch.device]]`): The device map to check. + """ + all_model_tensors = [name for name, _ in model.state_dict().items()] + for module_name in device_map.keys(): + if module_name == "": + all_model_tensors.clear() + break + else: + all_model_tensors = [ + name + for name in all_model_tensors + if not name == module_name and not name.startswith(module_name + ".") + ] + if len(all_model_tensors) > 0: + non_covered_params = ", ".join(all_model_tensors) + raise ValueError( + f"The device_map provided does not give any device for the following parameters: {non_covered_params}" + ) + + +def load_state_dict(checkpoint_file, device_map=None): + """ + Load a checkpoint from a given file. If the checkpoint is in the safetensors format and a device map is passed, the + weights can be fast-loaded directly on the GPU. + + Args: + checkpoint_file (`str`): The path to the checkpoint to load. + device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): + A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer + name, once a given module name is inside, every submodule of it will be sent to the same device. + """ + if checkpoint_file.endswith(".safetensors"): + with safe_open(checkpoint_file, framework="pt") as f: + metadata = f.metadata() + weight_names = f.keys() + + if metadata is None: + logger.warn( + f"The safetensors archive passed at {checkpoint_file} does not contain metadata. " + "Make sure to save your model with the `save_pretrained` method. Defaulting to 'pt' metadata." + ) + metadata = {"format": "pt"} + + if metadata.get("format") not in ["pt", "tf", "flax"]: + raise OSError( + f"The safetensors archive passed at {checkpoint_file} does not contain the valid metadata. Make sure " + "you save your model with the `save_pretrained` method." + ) + elif metadata["format"] != "pt": + raise ValueError(f"The checkpoint passed was saved with {metadata['format']}, we need a the pt format.") + if device_map is None: + return safe_load_file(checkpoint_file) + else: + # if we only have one device we can load everything directly + if len(set(device_map.values())) == 1: + return safe_load_file(checkpoint_file, device=list(device_map.values())[0]) + + devices = list(set(device_map.values()) - {"disk"}) + # cpu device should always exist as fallback option + if "cpu" not in devices: + devices.append("cpu") + + # For each device, get the weights that go there + device_weights = {device: [] for device in devices} + for module_name, device in device_map.items(): + if device in devices: + device_weights[device].extend( + [k for k in weight_names if k == module_name or k.startswith(module_name + ".")] + ) + + # all weights that haven't defined a device should be loaded on CPU + device_weights["cpu"].extend([k for k in weight_names if k not in sum(device_weights.values(), [])]) + tensors = {} + if is_tqdm_available(): + progress_bar = tqdm( + main_process_only=False, + total=sum([len(device_weights[device]) for device in devices]), + unit="w", + smoothing=0, + leave=False, + ) + else: + progress_bar = None + for device in devices: + target_device = device + + if is_xpu_available(): + current_safetensors_version = packaging.version.parse(importlib.metadata.version("safetensors")) + + if compare_versions(current_safetensors_version, "<", "0.4.2"): + raise ModuleNotFoundError( + f"You need at least safetensors 0.4.2 for Intel GPU, while you have {current_safetensors_version}" + ) + + if isinstance(device, int): + target_device = f"xpu:{device}" + + with safe_open(checkpoint_file, framework="pt", device=target_device) as f: + for key in device_weights[device]: + if progress_bar is not None: + progress_bar.set_postfix(dev=device, refresh=False) + progress_bar.set_description(key) + tensors[key] = f.get_tensor(key) + if progress_bar is not None: + progress_bar.update() + if progress_bar is not None: + progress_bar.close() + + return tensors + else: + return torch.load(checkpoint_file, map_location=torch.device("cpu")) + + +def get_state_dict_offloaded_model(model: nn.Module): + """ + Returns the state dictionary for an offloaded model via iterative onloading + + Args: + model (`torch.nn.Module`): + The offloaded model we want to save + """ + from ..hooks import AlignDevicesHook + + state_dict = {} + placeholders = set() + for name, module in model.named_modules(): + if name == "": + continue + if hasattr(module, "_hf_hook") and isinstance(module._hf_hook, AlignDevicesHook) and module._hf_hook.offload: + original_device = module._hf_hook.execution_device + # assign hook execution device to cpu + module._hf_hook.execution_device = "cpu" + # onload meta tensors to execution device + try: + module._hf_hook.pre_forward(module) + except MemoryError: + raise MemoryError("Offloaded module must fit in CPU memory to call save_model!") from None + module_state_dict = module.state_dict() + # offload meta tensors from cpu + module._hf_hook.post_forward(module, torch.tensor([])) + # re-assign hook to original execution device + module._hf_hook.execution_device = original_device + else: + module_state_dict = module.state_dict() + + for key in module_state_dict: + # ignore placeholder parameters that are still on the meta device + if module_state_dict[key].device == torch.device("meta"): + placeholders.add(name + f".{key}") + continue + params = module_state_dict[key] + state_dict[name + f".{key}"] = params + for key in placeholders.copy(): + if key in state_dict: + placeholders.remove(key) + if placeholders: + logger.warning(f"The following tensors were not saved because they were still on meta device: {placeholders}") + + return state_dict + + +def load_checkpoint_in_model( + model: nn.Module, + checkpoint: Union[str, os.PathLike], + device_map: Optional[Dict[str, Union[int, str, torch.device]]] = None, + offload_folder: Optional[Union[str, os.PathLike]] = None, + dtype: Optional[Union[str, torch.dtype]] = None, + offload_state_dict: bool = False, + offload_buffers: bool = False, + keep_in_fp32_modules: List[str] = None, + offload_8bit_bnb: bool = False, + strict: bool = False, +): + """ + Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are + loaded. + + + + Once loaded across devices, you still need to call [`dispatch_model`] on your model to make it able to run. To + group the checkpoint loading and dispatch in one single call, use [`load_checkpoint_and_dispatch`]. + + + + Args: + model (`torch.nn.Module`): + The model in which we want to load a checkpoint. + checkpoint (`str` or `os.PathLike`): + The folder checkpoint to load. It can be: + - a path to a file containing a whole model state dict + - a path to a `.json` file containing the index to a sharded checkpoint + - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint. + - a path to a folder containing a unique pytorch_model.bin or a model.safetensors file. + device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): + A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer + name, once a given module name is inside, every submodule of it will be sent to the same device. + offload_folder (`str` or `os.PathLike`, *optional*): + If the `device_map` contains any value `"disk"`, the folder where we will offload weights. + dtype (`str` or `torch.dtype`, *optional*): + If provided, the weights will be converted to that type when loaded. + offload_state_dict (`bool`, *optional*, defaults to `False`): + If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if + the weight of the CPU state dict + the biggest shard does not fit. + offload_buffers (`bool`, *optional*, defaults to `False`): + Whether or not to include the buffers in the weights offloaded to disk. + keep_in_fp32_modules(`List[str]`, *optional*): + A list of the modules that we keep in `torch.float32` dtype. + offload_8bit_bnb (`bool`, *optional*): + Whether or not to enable offload of 8-bit modules on cpu/disk. + strict (`bool`, *optional*, defaults to `False`): + Whether to strictly enforce that the keys in the checkpoint state_dict match the keys of the model's + state_dict. + + """ + if offload_8bit_bnb: + from .bnb import quantize_and_offload_8bit + + tied_params = find_tied_parameters(model) + + if check_tied_parameters_in_config(model) and len(tied_params) == 0: + logger.warn( + "The model weights are not tied. Please use the `tie_weights` method before using the `infer_auto_device` function." + ) + if device_map is not None: + check_tied_parameters_on_same_device(tied_params, device_map) + + if offload_folder is None and device_map is not None and "disk" in device_map.values(): + raise ValueError( + "At least one of the model submodule will be offloaded to disk, please pass along an `offload_folder`." + ) + elif offload_folder is not None and device_map is not None and "disk" in device_map.values(): + os.makedirs(offload_folder, exist_ok=True) + + if isinstance(dtype, str): + # We accept "torch.float16" or just "float16" + dtype = dtype.replace("torch.", "") + dtype = getattr(torch, dtype) + + checkpoint_files = None + index_filename = None + if os.path.isfile(checkpoint): + if str(checkpoint).endswith(".json"): + index_filename = checkpoint + else: + checkpoint_files = [checkpoint] + elif os.path.isdir(checkpoint): + # check if the whole state dict is present + potential_state_bin = [f for f in os.listdir(checkpoint) if f == WEIGHTS_NAME] + potential_state_safetensor = [f for f in os.listdir(checkpoint) if f == SAFE_WEIGHTS_NAME] + if len(potential_state_bin) == 1: + checkpoint_files = [os.path.join(checkpoint, potential_state_bin[0])] + elif len(potential_state_safetensor) == 1: + checkpoint_files = [os.path.join(checkpoint, potential_state_safetensor[0])] + else: + # otherwise check for sharded checkpoints + potential_index = [f for f in os.listdir(checkpoint) if f.endswith(".index.json")] + if len(potential_index) == 0: + raise ValueError( + f"{checkpoint} is not a folder containing a `.index.json` file or a {WEIGHTS_NAME} or a {SAFE_WEIGHTS_NAME} file" + ) + elif len(potential_index) == 1: + index_filename = os.path.join(checkpoint, potential_index[0]) + else: + raise ValueError( + f"{checkpoint} containing more than one `.index.json` file, delete the irrelevant ones." + ) + else: + raise ValueError( + "`checkpoint` should be the path to a file containing a whole state dict, or the index of a sharded " + f"checkpoint, or a folder containing a sharded checkpoint or the whole state dict, but got {checkpoint}." + ) + + if index_filename is not None: + checkpoint_folder = os.path.split(index_filename)[0] + with open(index_filename) as f: + index = json.loads(f.read()) + + if "weight_map" in index: + index = index["weight_map"] + checkpoint_files = sorted(list(set(index.values()))) + checkpoint_files = [os.path.join(checkpoint_folder, f) for f in checkpoint_files] + + # Logic for missing/unexepected keys goes here. + + offload_index = {} + if offload_state_dict: + state_dict_folder = tempfile.mkdtemp() + state_dict_index = {} + + unexpected_keys = set() + model_keys = set(model.state_dict().keys()) + buffer_names = [name for name, _ in model.named_buffers()] + for checkpoint_file in checkpoint_files: + loaded_checkpoint = load_state_dict(checkpoint_file, device_map=device_map) + if device_map is None: + model.load_state_dict(loaded_checkpoint, strict=strict) + unexpected_keys.update(set(loaded_checkpoint.keys()) - model_keys) + else: + for param_name, param in loaded_checkpoint.items(): + # skip SCB parameter (for 8-bit serialization) + if "SCB" in param_name: + continue + + if param_name not in model_keys: + unexpected_keys.add(param_name) + if not strict: + continue # Skip loading this parameter. + + module_name = param_name + + while len(module_name) > 0 and module_name not in device_map: + module_name = ".".join(module_name.split(".")[:-1]) + if module_name == "" and "" not in device_map: + # TODO: group all errors and raise at the end. + raise ValueError(f"{param_name} doesn't have any device set.") + param_device = device_map[module_name] + new_dtype = dtype + if dtype is not None and torch.is_floating_point(param): + if keep_in_fp32_modules is not None and dtype == torch.float16: + proceed = False + for key in keep_in_fp32_modules: + if ((key in param_name) and (key + "." in param_name)) or key == param_name: + proceed = True + break + if proceed: + new_dtype = torch.float32 + + if "weight" in param_name and param_name.replace("weight", "SCB") in loaded_checkpoint.keys(): + if param.dtype == torch.int8: + fp16_statistics = loaded_checkpoint[param_name.replace("weight", "SCB")] + else: + fp16_statistics = None + + if param_device == "disk": + if offload_buffers or param_name not in buffer_names: + if new_dtype is None: + new_dtype = param.dtype + if offload_8bit_bnb: + quantize_and_offload_8bit( + model, param, param_name, new_dtype, offload_folder, offload_index, fp16_statistics + ) + continue + else: + set_module_tensor_to_device(model, param_name, "meta", dtype=new_dtype) + offload_weight(param, param_name, offload_folder, index=offload_index) + elif param_device == "cpu" and offload_state_dict: + if new_dtype is None: + new_dtype = param.dtype + if offload_8bit_bnb: + quantize_and_offload_8bit( + model, param, param_name, new_dtype, state_dict_folder, state_dict_index, fp16_statistics + ) + else: + set_module_tensor_to_device(model, param_name, "meta", dtype=new_dtype) + offload_weight(param, param_name, state_dict_folder, index=state_dict_index) + else: + set_module_tensor_to_device( + model, + param_name, + param_device, + value=param, + dtype=new_dtype, + fp16_statistics=fp16_statistics, + ) + + # Force Python to clean up. + del loaded_checkpoint + gc.collect() + + if not strict and len(unexpected_keys) > 0: + logger.warning( + f"Some weights of the model checkpoint at {checkpoint} were not used when" + f" initializing {model.__class__.__name__}: {unexpected_keys}. This may or may not be an issue - make sure that the checkpoint does not have unnecessary parameters, or that the model definition correctly corresponds to the checkpoint." + ) + + save_offload_index(offload_index, offload_folder) + + # Load back offloaded state dict on CPU + if offload_state_dict: + load_offloaded_weights(model, state_dict_index, state_dict_folder) + shutil.rmtree(state_dict_folder) + + retie_parameters(model, tied_params) + + +def get_mixed_precision_context_manager(native_amp: bool = False, autocast_kwargs: AutocastKwargs = None): + """ + Return a context manager for autocasting mixed precision + + Args: + native_amp (`bool`, *optional*, defaults to False): + Whether mixed precision is actually enabled. + cache_enabled (`bool`, *optional*, defaults to True): + Whether the weight cache inside autocast should be enabled. + """ + state = AcceleratorState() + if autocast_kwargs is None: + autocast_kwargs = {} + else: + autocast_kwargs = autocast_kwargs.to_kwargs() + if native_amp: + device_type = ( + "cuda" + if (state.distributed_type == DistributedType.XLA and is_torch_xla_available(check_is_gpu=True)) + else state.device.type + ) + if state.mixed_precision == "fp16": + return torch.autocast(device_type=device_type, dtype=torch.float16, **autocast_kwargs) + elif state.mixed_precision == "bf16" and state.distributed_type in [ + DistributedType.NO, + DistributedType.MULTI_CPU, + DistributedType.MULTI_GPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_NPU, + DistributedType.MULTI_XPU, + DistributedType.FSDP, + DistributedType.XLA, + ]: + return torch.autocast(device_type=device_type, dtype=torch.bfloat16, **autocast_kwargs) + else: + return torch.autocast(device_type=device_type, **autocast_kwargs) + else: + return contextlib.nullcontext() diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/rich.py b/venv/lib/python3.10/site-packages/accelerate/utils/rich.py new file mode 100644 index 0000000000000000000000000000000000000000..2d48661b7fcef92ef1168b74cc275c6d3ccc67a1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/rich.py @@ -0,0 +1,24 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .imports import is_rich_available + + +if is_rich_available(): + from rich.traceback import install + + install(show_locals=False) + +else: + raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`") diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/tqdm.py b/venv/lib/python3.10/site-packages/accelerate/utils/tqdm.py new file mode 100644 index 0000000000000000000000000000000000000000..940a8bb04aced0c898ba1926bacc4b60b72d6f54 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/tqdm.py @@ -0,0 +1,37 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .imports import is_tqdm_available + + +if is_tqdm_available(): + from tqdm.auto import tqdm as _tqdm + +from ..state import PartialState + + +def tqdm(main_process_only: bool = True, *args, **kwargs): + """ + Wrapper around `tqdm.tqdm` that optionally displays only on the main process. + + Args: + main_process_only (`bool`, *optional*): + Whether to display the progress bar only on the main process + """ + if not is_tqdm_available(): + raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.") + disable = False + if main_process_only: + disable = PartialState().local_process_index != 0 + return _tqdm(*args, **kwargs, disable=disable) diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/transformer_engine.py b/venv/lib/python3.10/site-packages/accelerate/utils/transformer_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..211a9f553ca22ac4938969416d07a9b139918b60 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/transformer_engine.py @@ -0,0 +1,84 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch.nn as nn + +from .imports import is_fp8_available + + +if is_fp8_available(): + import transformer_engine.pytorch as te + + +def convert_model(model, to_transformer_engine=True, _convert_linear=True, _convert_ln=True): + """ + Recursively converts the linear and layernorm layers of a model to their `transformers_engine` counterpart. + """ + if not is_fp8_available(): + raise ImportError("Using `convert_model` requires transformer_engine to be installed.") + for name, module in model.named_children(): + if isinstance(module, nn.Linear) and to_transformer_engine and _convert_linear: + # Return early if the linear layer weights are not multiples of 16 + if any(p % 16 != 0 for p in module.weight.shape): + return + has_bias = module.bias is not None + te_module = te.Linear( + module.in_features, module.out_features, bias=has_bias, params_dtype=module.weight.dtype + ) + te_module.weight.copy_(module.weight) + if has_bias: + te_module.bias.copy_(module.bias) + + setattr(model, name, te_module) + elif isinstance(module, nn.LayerNorm) and to_transformer_engine and _convert_ln: + te_module = te.LayerNorm(module.normalized_shape[0], eps=module.eps, params_dtype=module.weight.dtype) + te_module.weight.copy_(module.weight) + te_module.bias.copy_(module.bias) + + setattr(model, name, te_module) + elif isinstance(module, te.Linear) and not to_transformer_engine and _convert_linear: + has_bias = module.bias is not None + new_module = nn.Linear( + module.in_features, module.out_features, bias=has_bias, params_dtype=module.weight.dtype + ) + new_module.weight.copy_(module.weight) + if has_bias: + new_module.bias.copy_(module.bias) + + setattr(model, name, new_module) + elif isinstance(module, te.LayerNorm) and not to_transformer_engine and _convert_ln: + new_module = nn.LayerNorm(module.normalized_shape[0], eps=module.eps, params_dtype=module.weight.dtype) + new_module.weight.copy_(module.weight) + new_module.bias.copy_(module.bias) + + setattr(model, name, new_module) + else: + convert_model( + module, + to_transformer_engine=to_transformer_engine, + _convert_linear=_convert_linear, + _convert_ln=_convert_ln, + ) + + +def has_transformer_engine_layers(model): + """ + Returns whether a given model has some `transformer_engine` layer or not. + """ + if not is_fp8_available(): + raise ImportError("Using `has_transformer_engine_layers` requires transformer_engine to be installed.") + for m in model.modules(): + if isinstance(m, (te.LayerNorm, te.Linear, te.TransformerLayer)): + return True + return False