diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd3c79509533b7426b78c5f93a8ac8ce4a83d2fd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/accelerate_cli.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/accelerate_cli.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba346760c2f7a057df37e3131ac51e45f5fcd2d1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/accelerate_cli.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/env.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/env.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2a59e6852fd6df058b639a68f36466124457e83 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/env.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/estimate.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/estimate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3d65668013cd65055de90cc99df1b648fd400cf Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/estimate.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/launch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/launch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70dff8fb2ad4041389f9ca6c2742ed8680f287be Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/launch.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/test.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27863996ffdc06d1565c550c226a7e8eeae68a13 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/test.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/tpu.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/tpu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c0657aafef6ab12a6a14c37e04e9e8b963893e5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/tpu.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b5ab3163e6c848f5486da8ee2a5b354114d9578 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__init__.py b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..649a15888cccd070b3d4ca9a600457c6ad59d4d3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__init__.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +from .config import config_command_parser +from .config_args import default_config_file, load_config_from_file # noqa: F401 +from .default import default_command_parser +from .update import update_command_parser + + +def get_config_parser(subparsers=None): + parent_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False) + # The main config parser + config_parser = config_command_parser(subparsers) + # The subparser to add commands to + subcommands = config_parser.add_subparsers(title="subcommands", dest="subcommand") + + # Then add other parsers with the parent parser + default_command_parser(subcommands, parents=[parent_parser]) + update_command_parser(subcommands, parents=[parent_parser]) + + return config_parser + + +def main(): + config_parser = get_config_parser() + args = config_parser.parse_args() + + if not hasattr(args, "func"): + config_parser.print_help() + exit(1) + + # Run + args.func(args) + + +if __name__ == "__main__": + main() diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3cd2833141af2c4ef71ad6cc90063e7b948ee9d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/cluster.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/cluster.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54ed73a7b70ab269024d4e4d3fb564090c68fb75 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/cluster.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e80e2a45b38d880776f77663332f64e68538c700 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_args.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_args.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c898fda2246d1e3e317bf2d4a0eef4f9419bd1e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_args.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5e1cae6a534a8f3d0f958e69256914c30b31154 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/default.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/default.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9ee97d8fbd9b695006bd6fd78305e6013f042a4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/default.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/sagemaker.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/sagemaker.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5511efb775e65176c00ecd66fb841d7e5967dba6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/sagemaker.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/update.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/update.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e6e0dc15073a3ccd5814bec562479ad36e6aa9e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/update.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/cluster.py b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/cluster.py new file mode 100644 index 0000000000000000000000000000000000000000..ad32c696cf48f5910acb2e2015b19c2a6dbc7123 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/cluster.py @@ -0,0 +1,717 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from ...utils import ( + ComputeEnvironment, + DistributedType, + is_deepspeed_available, + is_mlu_available, + is_mps_available, + is_npu_available, + is_transformers_available, + is_xpu_available, +) +from ...utils.constants import ( + DEEPSPEED_MULTINODE_LAUNCHERS, + FSDP_AUTO_WRAP_POLICY, + FSDP_BACKWARD_PREFETCH, + FSDP_SHARDING_STRATEGY, + FSDP_STATE_DICT_TYPE, + TORCH_DYNAMO_MODES, +) +from .config_args import ClusterConfig +from .config_utils import ( + DYNAMO_BACKENDS, + _ask_field, + _ask_options, + _convert_distributed_mode, + _convert_dynamo_backend, + _convert_mixed_precision, + _convert_yes_no_to_bool, +) + + +def get_cluster_input(): + distributed_type = _ask_options( + "Which type of machine are you using?", + ["No distributed training", "multi-CPU", "multi-XPU", "multi-GPU", "multi-NPU", "multi-MLU", "TPU"], + _convert_distributed_mode, + ) + + machine_rank = 0 + num_machines = 1 + num_processes = 1 + gpu_ids = None + main_process_ip = None + main_process_port = None + rdzv_backend = "static" + same_network = True + debug = False + + if distributed_type in [ + DistributedType.MULTI_GPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_NPU, + DistributedType.MULTI_XPU, + DistributedType.MULTI_CPU, + ]: + num_machines = _ask_field( + "How many different machines will you use (use more than 1 for multi-node training)? [1]: ", + int, + default=1, + ) + if num_machines > 1: + machine_rank = _ask_options( + "What is the rank of this machine?", + list(range(num_machines)), + int, + ) + main_process_ip = _ask_field( + "What is the IP address of the machine that will host the main process? ", + ) + main_process_port = _ask_field( + "What is the port you will use to communicate with the main process? ", + int, + ) + same_network = _ask_field( + "Are all the machines on the same local network? Answer `no` if nodes are on the cloud and/or on different network hosts [YES/no]: ", + _convert_yes_no_to_bool, + default=True, + error_message="Please enter yes or no.", + ) + if not same_network: + rdzv_backend = _ask_field( + "What rendezvous backend will you use? ('static', 'c10d', ...): ", default="static" + ) + debug = _ask_field( + "Should distributed operations be checked while running for errors? This can avoid timeout issues but will be slower. [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + + if distributed_type == DistributedType.NO: + use_cpu = _ask_field( + "Do you want to run your training on CPU only (even if a GPU / Apple Silicon / Ascend NPU device is available)? [yes/NO]:", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + elif distributed_type == DistributedType.MULTI_CPU: + use_cpu = True + else: + use_cpu = False + + ipex_config = {} + mpirun_config = {} + if use_cpu: + ipex_config["ipex"] = _ask_field( + "Do you want to use Intel PyTorch Extension (IPEX) to speed up training on CPU? [yes/NO]:", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if distributed_type == DistributedType.MULTI_CPU: + use_mpirun = _ask_field( + "Do you want accelerate to launch mpirun? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if use_mpirun: + mpirun_hostfile = _ask_field( + "Please enter the path to the hostfile to use with mpirun [~/hostfile]: ", + str, + default="~/hostfile", + ) + mpirun_config["mpirun_hostfile"] = os.path.expanduser(mpirun_hostfile.strip()) + mpirun_config["mpirun_ccl"] = _ask_field("Enter the number of oneCCL worker threads [1]: ", default=1) + if ( + not use_cpu + and is_xpu_available() + and distributed_type + not in [DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU, DistributedType.XLA] + ): + ipex_config["use_xpu"] = _ask_field( + "Do you want to use XPU plugin to speed up training on XPU? [yes/NO]:", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + + dynamo_config = {} + use_dynamo = _ask_field( + "Do you wish to optimize your script with torch dynamo?[yes/NO]:", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if use_dynamo: + prefix = "dynamo_" + dynamo_config[prefix + "backend"] = _ask_options( + "Which dynamo backend would you like to use?", + [x.lower() for x in DYNAMO_BACKENDS], + _convert_dynamo_backend, + default=2, + ) + use_custom_options = _ask_field( + "Do you want to customize the defaults sent to torch.compile? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + + if use_custom_options: + dynamo_config[prefix + "mode"] = _ask_options( + "Which mode do you want to use?", + TORCH_DYNAMO_MODES, + lambda x: TORCH_DYNAMO_MODES[int(x)], + default=0, + ) + dynamo_config[prefix + "use_fullgraph"] = _ask_field( + "Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + dynamo_config[prefix + "use_dynamic"] = _ask_field( + "Do you want to enable dynamic shape tracing? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + + use_mps = not use_cpu and is_mps_available() + deepspeed_config = {} + if ( + distributed_type + in [ + DistributedType.MULTI_GPU, + DistributedType.MULTI_XPU, + DistributedType.MULTI_NPU, + DistributedType.MULTI_MLU, + DistributedType.NO, + ] + and not use_mps + ): + use_deepspeed = _ask_field( + "Do you want to use DeepSpeed? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if use_deepspeed: + distributed_type = DistributedType.DEEPSPEED + assert ( + is_deepspeed_available() + ), "DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source" + + if distributed_type == DistributedType.DEEPSPEED: + use_deepspeed_config = _ask_field( + "Do you want to specify a json file to a DeepSpeed config? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if use_deepspeed_config: + deepspeed_config["deepspeed_config_file"] = _ask_field( + "Please enter the path to the json DeepSpeed config file: ", + str, + default="none", + ) + else: + deepspeed_config["zero_stage"] = _ask_options( + "What should be your DeepSpeed's ZeRO optimization stage?", + [0, 1, 2, 3], + int, + default=2, + ) + + deepspeed_devices = ["none", "cpu", "nvme"] + if deepspeed_config["zero_stage"] >= 2: + deepspeed_config["offload_optimizer_device"] = _ask_options( + "Where to offload optimizer states?", deepspeed_devices, lambda x: deepspeed_devices[int(x)] + ) + deepspeed_config["offload_param_device"] = _ask_options( + "Where to offload parameters?", deepspeed_devices, lambda x: deepspeed_devices[int(x)] + ) + if deepspeed_config["offload_param_device"] == "nvme": + deepspeed_config["offload_param_nvme_path"] = _ask_field( + "Nvme Path to offload parameters?", + str, + default="/nvme", + ) + if deepspeed_config["offload_optimizer_device"] == "nvme": + deepspeed_config["offload_optimizer_nvme_path"] = _ask_field( + "Nvme Path to offload optimizer states?", + str, + default="/nvme", + ) + deepspeed_config["gradient_accumulation_steps"] = _ask_field( + "How many gradient accumulation steps you're passing in your script? [1]: ", + int, + default=1, + ) + use_gradient_clipping = _ask_field( + "Do you want to use gradient clipping? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if use_gradient_clipping: + deepspeed_config["gradient_clipping"] = _ask_field( + "What is the gradient clipping value? [1.0]: ", + float, + default=1.0, + ) + if deepspeed_config["zero_stage"] == 3: + deepspeed_config["zero3_save_16bit_model"] = _ask_field( + "Do you want to save 16-bit model weights when using ZeRO Stage-3? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + deepspeed_config["zero3_init_flag"] = _ask_field( + "Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if deepspeed_config["zero3_init_flag"]: + if not is_transformers_available(): + raise Exception( + "When `zero3_init_flag` is set, it requires Transformers to be installed. " + "Please run `pip3 install transformers`." + ) + use_moe = _ask_field( + "Do you want to enable Mixture-of-Experts training (MoE)? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if use_moe: + deepspeed_config["deepspeed_moe_layer_cls_names"] = _ask_field( + "Specify the comma-separated list of transformers MoE layer class names (case-sensitive), e.g : " + " `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ... : ", + str, + ) + + if num_machines > 1: + launcher_query = "Which Type of launcher do you want to use?" + deepspeed_config["deepspeed_multinode_launcher"] = _ask_options( + launcher_query, + DEEPSPEED_MULTINODE_LAUNCHERS, + lambda x: DEEPSPEED_MULTINODE_LAUNCHERS[int(x)], + ) + + if deepspeed_config["deepspeed_multinode_launcher"] != DEEPSPEED_MULTINODE_LAUNCHERS[1]: + deepspeed_config["deepspeed_hostfile"] = _ask_field( + "DeepSpeed configures multi-node compute resources with hostfile. " + "Each row is of the format `hostname slots=[num_gpus]`, e.g., `localhost slots=2`; " + "for more information please refer official [documentation]" + "(https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node). " + "Please specify the location of hostfile: ", + str, + ) + + is_exclusion_filter = _ask_field( + "Do you want to specify exclusion filter string? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if is_exclusion_filter: + deepspeed_config["deepspeed_exclusion_filter"] = _ask_field( + "DeepSpeed exclusion filter string: ", + str, + ) + + is_inclusion_filter = _ask_field( + "Do you want to specify inclusion filter string? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if is_inclusion_filter: + deepspeed_config["deepspeed_inclusion_filter"] = _ask_field( + "DeepSpeed inclusion filter string: ", + str, + ) + + fsdp_config = {} + if distributed_type in [ + DistributedType.MULTI_GPU, + DistributedType.MULTI_NPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_XPU, + ]: + use_fsdp = _ask_field( + "Do you want to use FullyShardedDataParallel? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if use_fsdp: + distributed_type = DistributedType.FSDP + if distributed_type == DistributedType.FSDP: + sharding_strategy_query = "What should be your sharding strategy?" + fsdp_config["fsdp_sharding_strategy"] = _ask_options( + sharding_strategy_query, + FSDP_SHARDING_STRATEGY, + lambda x: FSDP_SHARDING_STRATEGY[int(x)], + ) + fsdp_config["fsdp_offload_params"] = _ask_field( + "Do you want to offload parameters and gradients to CPU? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + fsdp_wrap_query = "What should be your auto wrap policy?" + fsdp_config["fsdp_auto_wrap_policy"] = _ask_options( + fsdp_wrap_query, + FSDP_AUTO_WRAP_POLICY, + lambda x: FSDP_AUTO_WRAP_POLICY[int(x)], + ) + if fsdp_config["fsdp_auto_wrap_policy"] == FSDP_AUTO_WRAP_POLICY[0]: + use_no_split_modules = _ask_field( + "Do you want to use the model's `_no_split_modules` to wrap. Only applicable for 🤗 Transformers [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if not use_no_split_modules: + fsdp_config["fsdp_transformer_layer_cls_to_wrap"] = _ask_field( + "Specify the comma-separated list of transformer layer class names (case-sensitive) to wrap ,e.g, :" + "`BertLayer`, `GPTJBlock`, `T5Block`, `BertLayer,BertEmbeddings,BertSelfOutput` ...? : ", + str, + ) + elif fsdp_config["fsdp_auto_wrap_policy"] == FSDP_AUTO_WRAP_POLICY[1]: + fsdp_config["fsdp_min_num_params"] = _ask_field( + "What should be your FSDP's minimum number of parameters for Default Auto Wrapping Policy? [1e8]: ", + int, + default=100000000, + ) + fsdp_backward_prefetch_query = "What should be your FSDP's backward prefetch policy?" + fsdp_config["fsdp_backward_prefetch"] = _ask_options( + fsdp_backward_prefetch_query, + FSDP_BACKWARD_PREFETCH, + lambda x: FSDP_BACKWARD_PREFETCH[int(x)], + ) + fsdp_state_dict_type_query = "What should be your FSDP's state dict type?" + fsdp_config["fsdp_state_dict_type"] = _ask_options( + fsdp_state_dict_type_query, + FSDP_STATE_DICT_TYPE, + lambda x: FSDP_STATE_DICT_TYPE[int(x)], + default=2, + ) + fsdp_config["fsdp_forward_prefetch"] = _ask_field( + "Do you want to enable FSDP's forward prefetch policy? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + fsdp_config["fsdp_use_orig_params"] = _ask_field( + "Do you want to enable FSDP's `use_orig_params` feature? [YES/no]: ", + _convert_yes_no_to_bool, + default=True, + error_message="Please enter yes or no.", + ) + fsdp_config["fsdp_cpu_ram_efficient_loading"] = _ask_field( + "Do you want to enable CPU RAM efficient model loading? Only applicable for 🤗 Transformers models. [YES/no]: ", + _convert_yes_no_to_bool, + default=True, + error_message="Please enter yes or no.", + ) + if fsdp_config["fsdp_cpu_ram_efficient_loading"]: + fsdp_config["fsdp_sync_module_states"] = True + else: + fsdp_config["fsdp_sync_module_states"] = _ask_field( + "Do you want each individually wrapped FSDP unit to broadcast module parameters from rank 0 at the start? [YES/no]: ", + _convert_yes_no_to_bool, + default=True, + error_message="Please enter yes or no.", + ) + + megatron_lm_config = {} + if distributed_type in [DistributedType.MULTI_GPU]: + use_megatron_lm = _ask_field( + "Do you want to use Megatron-LM ? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if use_megatron_lm: + distributed_type = DistributedType.MEGATRON_LM + if distributed_type == DistributedType.MEGATRON_LM: + prefix = "megatron_lm_" + megatron_lm_config[prefix + "tp_degree"] = _ask_field( + "What is the Tensor Parallelism degree/size? [1]:", + int, + default=1, + error_message="Please enter an integer.", + ) + if megatron_lm_config[prefix + "tp_degree"] > 1: + megatron_lm_config[prefix + "sequence_parallelism"] = _ask_field( + "Do you want to enable Sequence Parallelism? [YES/no]: ", + _convert_yes_no_to_bool, + default=True, + error_message="Please enter yes or no.", + ) + + megatron_lm_config[prefix + "pp_degree"] = _ask_field( + "What is the Pipeline Parallelism degree/size? [1]:", + int, + default=1, + error_message="Please enter an integer.", + ) + if megatron_lm_config[prefix + "pp_degree"] > 1: + megatron_lm_config[prefix + "num_micro_batches"] = _ask_field( + "What is the number of micro-batches? [1]:", + int, + default=1, + error_message="Please enter an integer.", + ) + + megatron_lm_config[prefix + "recompute_activations"] = _ask_field( + "Do you want to enable selective activation recomputation? [YES/no]: ", + _convert_yes_no_to_bool, + default=True, + error_message="Please enter yes or no.", + ) + + megatron_lm_config[prefix + "use_distributed_optimizer"] = _ask_field( + "Do you want to use distributed optimizer " + "which shards optimizer state and gradients across data parallel ranks? [YES/no]: ", + _convert_yes_no_to_bool, + default=True, + error_message="Please enter yes or no.", + ) + + megatron_lm_config[prefix + "gradient_clipping"] = _ask_field( + "What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]: ", + float, + default=1.0, + ) + # TPU specific defaults + tpu_commands = None + tpu_command_file = None + tpu_downcast_bf16 = "no" + tpu_env = [] + tpu_name = None + tpu_vm = None + tpu_zone = None + tpu_use_sudo = False + tpu_use_cluster = False + + if distributed_type in [ + DistributedType.MULTI_CPU, + DistributedType.MULTI_XPU, + DistributedType.MULTI_GPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_NPU, + DistributedType.XLA, + ]: + machine_type = str(distributed_type).split(".")[1].replace("MULTI_", "") + if machine_type == "TPU": + machine_type += " cores" + elif machine_type == "CPU": + machine_type = "processes" + else: + machine_type += "(s)" + num_processes = _ask_field( + f"How many {machine_type} should be used for distributed training? [1]:", + int, + default=1, + error_message="Please enter an integer.", + ) + elif distributed_type in [DistributedType.FSDP, DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]: + num_processes = _ask_field( + "How many GPU(s) should be used for distributed training? [1]:", + int, + default=1, + error_message="Please enter an integer.", + ) + else: + num_processes = 1 + + if (distributed_type == DistributedType.MULTI_GPU) and (num_machines == 1) and (num_processes == 1): + raise ValueError( + f"Specified distributed type {distributed_type} but only using 1 GPU on a single machine. Please select `No distributed training` for the type of machine you are using." + ) + + if ( + distributed_type + in [ + DistributedType.MULTI_GPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_NPU, + DistributedType.MULTI_XPU, + DistributedType.NO, + ] + and not use_cpu + and not use_mps + ): + if is_npu_available(): + machine_type = "NPU(s)" + elif is_mlu_available(): + machine_type = "MLU(s)" + else: + machine_type = "GPU(s)" + gpu_ids = _ask_field( + f"What {machine_type} (by id) should be used for training on this machine as a comma-seperated list? [all]:", + default="all", + ) + + # CPU affinity is only supported on NVIDIA hardware for now + enable_cpu_affinity = False + if distributed_type == (DistributedType.NO, DistributedType.MULTI_GPU) and not use_cpu and not use_mps: + enable_cpu_affinity = _ask_field( + "Would you like to enable numa efficiency? (Currently only supported on NVIDIA hardware). [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + + if distributed_type == DistributedType.XLA: + mixed_precision = "no" + main_training_function = _ask_field( + "What is the name of the function in your script that should be launched in all parallel scripts? [main]: ", + default="main", + ) + tpu_use_cluster = _ask_field( + "Are you using a TPU cluster? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if tpu_use_cluster: + tpu_name = _ask_field( + "What is the name of your TPU cluster? ", + default=None, + error_message="Please enter the name of your TPU cluster.", + ) + tpu_zone = _ask_field( + "What is the zone of your TPU cluster? ", + default=None, + error_message="Please enter the zone of your TPU cluster.", + ) + tpu_use_sudo = _ask_field( + "To run a python script in a TPU pod, should `sudo` be used? [yes/NO]: ", + default=False, + error_message="Please enter yes or no.", + ) + run_commands = _ask_field( + "Do you have code you wish to run on startup in each pod? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if run_commands: + use_command_file = _ask_field( + "Is this code located in a bash script? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if use_command_file: + tpu_command_file = _ask_field( + "What is the path to your bash script? ", + default=None, + error_message="Please enter the path to your bash script.", + ) + tpu_command_file = os.path.abspath(tpu_command_file) + else: + print("Please enter each command seperately you wish to run on startup in each pod.") + tpu_commands = [] + another_command = True + while another_command: + tpu_commands.append( + _ask_field( + "Please enter a single command to be ran ", + default=None, + error_message="Please enter the commands you wish to run on startup in each pod as a single string.", + ) + ) + another_command = _ask_field( + "Do you wish to add another command? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + tpu_vm = _ask_field( + "If not using an instance group, what are the names of the Compute VM instances to be used, seperated by a comma: ", + default="", + ).split(",") + tpu_env = _ask_field( + "What environment variables do you wish to set in each pod, seperated by a comma: ", + default="", + ).split(",") + + else: + main_training_function = "main" + if distributed_type == DistributedType.DEEPSPEED and use_deepspeed_config: + mixed_precision = None + else: + mixed_precision = _ask_options( + "Do you wish to use FP16 or BF16 (mixed precision)?", + ["no", "fp16", "bf16", "fp8"], + _convert_mixed_precision, + ) + + if use_dynamo and mixed_precision == "no" and not use_cpu: + print( + "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." + ) + + if distributed_type == DistributedType.XLA and mixed_precision == "bf16": + tpu_downcast_bf16 = _ask_field( + "Should `torch.float` be cast as `bfloat16` and `torch.double` remain `float32` on TPUs?", default="no" + ) + + return ClusterConfig( + compute_environment=ComputeEnvironment.LOCAL_MACHINE, + distributed_type=distributed_type, + num_processes=num_processes, + gpu_ids=gpu_ids, + mixed_precision=mixed_precision, + downcast_bf16=tpu_downcast_bf16, + machine_rank=machine_rank, + num_machines=num_machines, + main_process_ip=main_process_ip, + main_process_port=main_process_port, + main_training_function=main_training_function, + deepspeed_config=deepspeed_config, + fsdp_config=fsdp_config, + megatron_lm_config=megatron_lm_config, + ipex_config=ipex_config, + mpirun_config=mpirun_config, + use_cpu=use_cpu, + rdzv_backend=rdzv_backend, + same_network=same_network, + commands=tpu_commands, + command_file=tpu_command_file, + tpu_env=tpu_env, + tpu_name=tpu_name, + tpu_vm=tpu_vm, + tpu_zone=tpu_zone, + tpu_use_sudo=tpu_use_sudo, + tpu_use_cluster=tpu_use_cluster, + dynamo_config=dynamo_config, + debug=debug, + enable_cpu_affinity=enable_cpu_affinity, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/config.py b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/config.py new file mode 100644 index 0000000000000000000000000000000000000000..72414f2abe62d76bd5133f4b0ed99bf34133f6f6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/config.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os + +from accelerate.utils import ComputeEnvironment + +from .cluster import get_cluster_input +from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 +from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 +from .sagemaker import get_sagemaker_input + + +description = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine" + + +def get_user_input(): + compute_environment = _ask_options( + "In which compute environment are you running?", + ["This machine", "AWS (Amazon SageMaker)"], + _convert_compute_environment, + ) + if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: + config = get_sagemaker_input() + else: + config = get_cluster_input() + return config + + +def config_command_parser(subparsers=None): + if subparsers is not None: + parser = subparsers.add_parser("config", description=description) + else: + parser = argparse.ArgumentParser("Accelerate config command", description=description) + + parser.add_argument( + "--config_file", + default=None, + help=( + "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " + "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " + "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " + "with 'huggingface'." + ), + ) + + if subparsers is not None: + parser.set_defaults(func=config_command) + return parser + + +def config_command(args): + config = get_user_input() + if args.config_file is not None: + config_file = args.config_file + else: + if not os.path.isdir(cache_dir): + os.makedirs(cache_dir) + config_file = default_yaml_config_file + + if config_file.endswith(".json"): + config.to_json_file(config_file) + else: + config.to_yaml_file(config_file) + print(f"accelerate configuration saved at {config_file}") + + +def main(): + parser = config_command_parser() + args = parser.parse_args() + config_command(args) + + +if __name__ == "__main__": + main() diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/config_args.py b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/config_args.py new file mode 100644 index 0000000000000000000000000000000000000000..c50f1c34a42d354903a80b506290958807a7b7c0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/config_args.py @@ -0,0 +1,243 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +from dataclasses import dataclass +from enum import Enum +from typing import List, Optional, Union + +import yaml + +from ...utils import ComputeEnvironment, DistributedType, SageMakerDistributedType +from ...utils.constants import SAGEMAKER_PYTHON_VERSION, SAGEMAKER_PYTORCH_VERSION, SAGEMAKER_TRANSFORMERS_VERSION + + +hf_cache_home = os.path.expanduser( + os.environ.get("HF_HOME", os.path.join(os.environ.get("XDG_CACHE_HOME", "~/.cache"), "huggingface")) +) +cache_dir = os.path.join(hf_cache_home, "accelerate") +default_json_config_file = os.path.join(cache_dir, "default_config.yaml") +default_yaml_config_file = os.path.join(cache_dir, "default_config.yaml") + +# For backward compatibility: the default config is the json one if it's the only existing file. +if os.path.isfile(default_yaml_config_file) or not os.path.isfile(default_json_config_file): + default_config_file = default_yaml_config_file +else: + default_config_file = default_json_config_file + + +def load_config_from_file(config_file): + if config_file is not None: + if not os.path.isfile(config_file): + raise FileNotFoundError( + f"The passed configuration file `{config_file}` does not exist. " + "Please pass an existing file to `accelerate launch`, or use the default one " + "created through `accelerate config` and run `accelerate launch` " + "without the `--config_file` argument." + ) + else: + config_file = default_config_file + with open(config_file, encoding="utf-8") as f: + if config_file.endswith(".json"): + if ( + json.load(f).get("compute_environment", ComputeEnvironment.LOCAL_MACHINE) + == ComputeEnvironment.LOCAL_MACHINE + ): + config_class = ClusterConfig + else: + config_class = SageMakerConfig + return config_class.from_json_file(json_file=config_file) + else: + if ( + yaml.safe_load(f).get("compute_environment", ComputeEnvironment.LOCAL_MACHINE) + == ComputeEnvironment.LOCAL_MACHINE + ): + config_class = ClusterConfig + else: + config_class = SageMakerConfig + return config_class.from_yaml_file(yaml_file=config_file) + + +@dataclass +class BaseConfig: + compute_environment: ComputeEnvironment + distributed_type: Union[DistributedType, SageMakerDistributedType] + mixed_precision: str + use_cpu: bool + debug: bool + + def to_dict(self): + result = self.__dict__ + # For serialization, it's best to convert Enums to strings (or their underlying value type). + for key, value in result.items(): + if isinstance(value, Enum): + result[key] = value.value + if isinstance(value, dict) and not bool(value): + result[key] = None + result = {k: v for k, v in result.items() if v is not None} + return result + + @classmethod + def from_json_file(cls, json_file=None): + json_file = default_json_config_file if json_file is None else json_file + with open(json_file, encoding="utf-8") as f: + config_dict = json.load(f) + if "compute_environment" not in config_dict: + config_dict["compute_environment"] = ComputeEnvironment.LOCAL_MACHINE + if "mixed_precision" not in config_dict: + config_dict["mixed_precision"] = "fp16" if ("fp16" in config_dict and config_dict["fp16"]) else None + if "fp16" in config_dict: # Convert the config to the new format. + del config_dict["fp16"] + if "dynamo_backend" in config_dict: # Convert the config to the new format. + dynamo_backend = config_dict.pop("dynamo_backend") + config_dict["dynamo_config"] = {} if dynamo_backend == "NO" else {"dynamo_backend": dynamo_backend} + if "use_cpu" not in config_dict: + config_dict["use_cpu"] = False + if "debug" not in config_dict: + config_dict["debug"] = False + if "enable_cpu_affinity" not in config_dict: + config_dict["enable_cpu_affinity"] = False + extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys())) + if len(extra_keys) > 0: + raise ValueError( + f"The config file at {json_file} had unknown keys ({extra_keys}), please try upgrading your `accelerate`" + " version or fix (and potentially remove) these keys from your config file." + ) + + return cls(**config_dict) + + def to_json_file(self, json_file): + with open(json_file, "w", encoding="utf-8") as f: + content = json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" + f.write(content) + + @classmethod + def from_yaml_file(cls, yaml_file=None): + yaml_file = default_yaml_config_file if yaml_file is None else yaml_file + with open(yaml_file, encoding="utf-8") as f: + config_dict = yaml.safe_load(f) + if "compute_environment" not in config_dict: + config_dict["compute_environment"] = ComputeEnvironment.LOCAL_MACHINE + if "mixed_precision" not in config_dict: + config_dict["mixed_precision"] = "fp16" if ("fp16" in config_dict and config_dict["fp16"]) else None + if isinstance(config_dict["mixed_precision"], bool) and not config_dict["mixed_precision"]: + config_dict["mixed_precision"] = "no" + if "fp16" in config_dict: # Convert the config to the new format. + del config_dict["fp16"] + if "dynamo_backend" in config_dict: # Convert the config to the new format. + dynamo_backend = config_dict.pop("dynamo_backend") + config_dict["dynamo_config"] = {} if dynamo_backend == "NO" else {"dynamo_backend": dynamo_backend} + if "use_cpu" not in config_dict: + config_dict["use_cpu"] = False + if "debug" not in config_dict: + config_dict["debug"] = False + if "enable_cpu_affinity" not in config_dict: + config_dict["enable_cpu_affinity"] = False + extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys())) + if len(extra_keys) > 0: + raise ValueError( + f"The config file at {yaml_file} had unknown keys ({extra_keys}), please try upgrading your `accelerate`" + " version or fix (and potentially remove) these keys from your config file." + ) + return cls(**config_dict) + + def to_yaml_file(self, yaml_file): + with open(yaml_file, "w", encoding="utf-8") as f: + yaml.safe_dump(self.to_dict(), f) + + def __post_init__(self): + if isinstance(self.compute_environment, str): + self.compute_environment = ComputeEnvironment(self.compute_environment) + if isinstance(self.distributed_type, str): + if self.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: + self.distributed_type = SageMakerDistributedType(self.distributed_type) + else: + self.distributed_type = DistributedType(self.distributed_type) + if getattr(self, "dynamo_config", None) is None: + self.dynamo_config = {} + + +@dataclass +class ClusterConfig(BaseConfig): + num_processes: int + machine_rank: int = 0 + num_machines: int = 1 + gpu_ids: Optional[str] = None + main_process_ip: Optional[str] = None + main_process_port: Optional[int] = None + rdzv_backend: Optional[str] = "static" + same_network: Optional[bool] = False + main_training_function: str = "main" + enable_cpu_affinity: bool = False + + # args for deepspeed_plugin + deepspeed_config: dict = None + # args for fsdp + fsdp_config: dict = None + # args for megatron_lm + megatron_lm_config: dict = None + # args for ipex + ipex_config: dict = None + # args for mpirun + mpirun_config: dict = None + # args for TPU + downcast_bf16: bool = False + + # args for TPU pods + tpu_name: str = None + tpu_zone: str = None + tpu_use_cluster: bool = False + tpu_use_sudo: bool = False + command_file: str = None + commands: List[str] = None + tpu_vm: List[str] = None + tpu_env: List[str] = None + + # args for dynamo + dynamo_config: dict = None + + def __post_init__(self): + if self.deepspeed_config is None: + self.deepspeed_config = {} + if self.fsdp_config is None: + self.fsdp_config = {} + if self.megatron_lm_config is None: + self.megatron_lm_config = {} + if self.ipex_config is None: + self.ipex_config = {} + if self.mpirun_config is None: + self.mpirun_config = {} + return super().__post_init__() + + +@dataclass +class SageMakerConfig(BaseConfig): + ec2_instance_type: str + iam_role_name: str + image_uri: Optional[str] = None + profile: Optional[str] = None + region: str = "us-east-1" + num_machines: int = 1 + gpu_ids: str = "all" + base_job_name: str = f"accelerate-sagemaker-{num_machines}" + pytorch_version: str = SAGEMAKER_PYTORCH_VERSION + transformers_version: str = SAGEMAKER_TRANSFORMERS_VERSION + py_version: str = SAGEMAKER_PYTHON_VERSION + sagemaker_inputs_file: str = None + sagemaker_metrics_file: str = None + additional_args: dict = None + dynamo_config: dict = None diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/config_utils.py b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/config_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..091da5b03c771bccefc2cd21b047536fbc07bcbf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/config_utils.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +from ...utils.dataclasses import ( + ComputeEnvironment, + DistributedType, + DynamoBackend, + PrecisionType, + SageMakerDistributedType, +) +from ..menu import BulletMenu + + +DYNAMO_BACKENDS = [ + "EAGER", + "AOT_EAGER", + "INDUCTOR", + "AOT_TS_NVFUSER", + "NVPRIMS_NVFUSER", + "CUDAGRAPHS", + "OFI", + "FX2TRT", + "ONNXRT", + "TENSORRT", + "IPEX", + "TVM", +] + + +def _ask_field(input_text, convert_value=None, default=None, error_message=None): + ask_again = True + while ask_again: + result = input(input_text) + try: + if default is not None and len(result) == 0: + return default + return convert_value(result) if convert_value is not None else result + except Exception: + if error_message is not None: + print(error_message) + + +def _ask_options(input_text, options=[], convert_value=None, default=0): + menu = BulletMenu(input_text, options) + result = menu.run(default_choice=default) + return convert_value(result) if convert_value is not None else result + + +def _convert_compute_environment(value): + value = int(value) + return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value]) + + +def _convert_distributed_mode(value): + value = int(value) + return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "MULTI_MLU", "XLA"][value]) + + +def _convert_dynamo_backend(value): + value = int(value) + return DynamoBackend(DYNAMO_BACKENDS[value]).value + + +def _convert_mixed_precision(value): + value = int(value) + return PrecisionType(["no", "fp16", "bf16", "fp8"][value]) + + +def _convert_sagemaker_distributed_mode(value): + value = int(value) + return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value]) + + +def _convert_yes_no_to_bool(value): + return {"yes": True, "no": False}[value.lower()] + + +class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter): + """ + A custom formatter that will remove the usage line from the help message for subcommands. + """ + + def _format_usage(self, usage, actions, groups, prefix): + usage = super()._format_usage(usage, actions, groups, prefix) + usage = usage.replace(" [] ", "") + return usage diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/default.py b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/default.py new file mode 100644 index 0000000000000000000000000000000000000000..e33331b98e6c8eacbaf8e9710b40e2ca6fc88b3d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/default.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pathlib import Path + +import torch + +from ...utils import is_mlu_available, is_npu_available, is_xpu_available +from .config_args import ClusterConfig, default_json_config_file +from .config_utils import SubcommandHelpFormatter + + +description = "Create a default config file for Accelerate with only a few flags set." + + +def write_basic_config(mixed_precision="no", save_location: str = default_json_config_file, use_xpu: bool = False): + """ + Creates and saves a basic cluster config to be used on a local machine with potentially multiple GPUs. Will also + set CPU if it is a CPU-only machine. + + Args: + mixed_precision (`str`, *optional*, defaults to "no"): + Mixed Precision to use. Should be one of "no", "fp16", or "bf16" + save_location (`str`, *optional*, defaults to `default_json_config_file`): + Optional custom save location. Should be passed to `--config_file` when using `accelerate launch`. Default + location is inside the huggingface cache folder (`~/.cache/huggingface`) but can be overriden by setting + the `HF_HOME` environmental variable, followed by `accelerate/default_config.yaml`. + use_xpu (`bool`, *optional*, defaults to `False`): + Whether to use XPU if available. + """ + path = Path(save_location) + path.parent.mkdir(parents=True, exist_ok=True) + if path.exists(): + print( + f"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`." + ) + return False + mixed_precision = mixed_precision.lower() + if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: + raise ValueError( + f"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}" + ) + config = { + "compute_environment": "LOCAL_MACHINE", + "mixed_precision": mixed_precision, + } + if is_mlu_available(): + num_mlus = torch.mlu.device_count() + config["num_processes"] = num_mlus + config["use_cpu"] = False + if num_mlus > 1: + config["distributed_type"] = "MULTI_MLU" + else: + config["distributed_type"] = "NO" + elif torch.cuda.is_available(): + num_gpus = torch.cuda.device_count() + config["num_processes"] = num_gpus + config["use_cpu"] = False + if num_gpus > 1: + config["distributed_type"] = "MULTI_GPU" + else: + config["distributed_type"] = "NO" + elif is_xpu_available() and use_xpu: + num_xpus = torch.xpu.device_count() + config["num_processes"] = num_xpus + config["use_cpu"] = False + if num_xpus > 1: + config["distributed_type"] = "MULTI_XPU" + else: + config["distributed_type"] = "NO" + elif is_npu_available(): + num_npus = torch.npu.device_count() + config["num_processes"] = num_npus + config["use_cpu"] = False + if num_npus > 1: + config["distributed_type"] = "MULTI_NPU" + else: + config["distributed_type"] = "NO" + else: + num_xpus = 0 + config["use_cpu"] = True + config["num_processes"] = 1 + config["distributed_type"] = "NO" + config["debug"] = False + config = ClusterConfig(**config) + config.to_json_file(path) + return path + + +def default_command_parser(parser, parents): + parser = parser.add_parser("default", parents=parents, help=description, formatter_class=SubcommandHelpFormatter) + parser.add_argument( + "--config_file", + default=default_json_config_file, + help=( + "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " + "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " + "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " + "with 'huggingface'." + ), + dest="save_location", + ) + + parser.add_argument( + "--mixed_precision", + choices=["no", "fp16", "bf16"], + type=str, + help="Whether or not to use mixed precision training. " + "Choose between FP16 and BF16 (bfloat16) training. " + "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.", + default="no", + ) + parser.set_defaults(func=default_config_command) + return parser + + +def default_config_command(args): + config_file = write_basic_config(args.mixed_precision, args.save_location) + if config_file: + print(f"accelerate configuration saved at {config_file}") diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/update.py b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/update.py new file mode 100644 index 0000000000000000000000000000000000000000..5f025594b04ada3e3a78687befc5c1bc1d236adf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/update.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python + +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pathlib import Path + +from .config_args import default_config_file, load_config_from_file +from .config_utils import SubcommandHelpFormatter + + +description = "Update an existing config file with the latest defaults while maintaining the old configuration." + + +def update_config(args): + """ + Update an existing config file with the latest defaults while maintaining the old configuration. + """ + config_file = args.config_file + if config_file is None and Path(default_config_file).exists(): + config_file = default_config_file + elif not Path(config_file).exists(): + raise ValueError(f"The passed config file located at {config_file} doesn't exist.") + config = load_config_from_file(config_file) + + if config_file.endswith(".json"): + config.to_json_file(config_file) + else: + config.to_yaml_file(config_file) + return config_file + + +def update_command_parser(parser, parents): + parser = parser.add_parser("update", parents=parents, help=description, formatter_class=SubcommandHelpFormatter) + parser.add_argument( + "--config_file", + default=None, + help=( + "The path to the config file to update. Will default to a file named default_config.yaml in the cache " + "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " + "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " + "with 'huggingface'." + ), + ) + + parser.set_defaults(func=update_config_command) + return parser + + +def update_config_command(args): + config_file = update_config(args) + print(f"Sucessfully updated the configuration file at {config_file}.") diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/__init__.py b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fa9f058b3794209181e74862c5403d4e7c9db5be --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/__init__.py @@ -0,0 +1,51 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from .testing import ( + DEFAULT_LAUNCH_COMMAND, + are_the_same_tensors, + assert_exception, + device_count, + execute_subprocess_async, + get_launch_command, + memory_allocated_func, + path_in_accelerate_package, + require_bnb, + require_cpu, + require_cuda, + require_huggingface_suite, + require_mlu, + require_mps, + require_multi_device, + require_multi_gpu, + require_multi_xpu, + require_non_cpu, + require_non_torch_xla, + require_non_xpu, + require_npu, + require_pippy, + require_single_device, + require_single_gpu, + require_single_xpu, + require_torch_min_version, + require_torchvision, + require_tpu, + require_xpu, + skip, + slow, + torch_device, +) +from .training import RegressionDataset, RegressionModel, RegressionModel4XPU + + +from .scripts import test_script, test_sync, test_ops # isort: skip diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd402508a2a0d5efb8d478bb1d4f8e8a92dc2afc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/examples.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/examples.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d817f3ff17900806ce5defe9c26c202f6e8facb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/examples.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/testing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/testing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e6a1fc8fe332e78a4c3402aaeb6c6a541b08545 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/testing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/training.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/training.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7dd06668ed2af6a7e14bb0e619b986589ef4f4a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/training.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/examples.py b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/examples.py new file mode 100644 index 0000000000000000000000000000000000000000..ed41d38c9092385ba9730472aa10b5208f48c67b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/examples.py @@ -0,0 +1,146 @@ +#!/usr/bin/env python + +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +A collection of utilities for comparing `examples/complete_*_example.py` scripts with the capabilities inside of each +`examples/by_feature` example. `compare_against_test` is the main function that should be used when testing, while the +others are used to either get the code that matters, or to preprocess them (such as stripping comments) +""" + +import os +from typing import List + + +def get_function_contents_by_name(lines: List[str], name: str): + """ + Extracts a function from `lines` of segmented source code with the name `name`. + + Args: + lines (`List[str]`): + Source code of a script seperated by line. + name (`str`): + The name of the function to extract. Should be either `training_function` or `main` + """ + if name != "training_function" and name != "main": + raise ValueError(f"Incorrect function name passed: {name}, choose either 'main' or 'training_function'") + good_lines, found_start = [], False + for line in lines: + if not found_start and f"def {name}" in line: + found_start = True + good_lines.append(line) + continue + if found_start: + if name == "training_function" and "def main" in line: + return good_lines + if name == "main" and "if __name__" in line: + return good_lines + good_lines.append(line) + + +def clean_lines(lines: List[str]): + """ + Filters `lines` and removes any entries that start with a comment ('#') or is just a newline ('\n') + + Args: + lines (`List[str]`): + Source code of a script seperated by line. + """ + return [line for line in lines if not line.lstrip().startswith("#") and line != "\n"] + + +def compare_against_test(base_filename: str, feature_filename: str, parser_only: bool, secondary_filename: str = None): + """ + Tests whether the additional code inside of `feature_filename` was implemented in `base_filename`. This should be + used when testing to see if `complete_*_.py` examples have all of the implementations from each of the + `examples/by_feature/*` scripts. + + It utilizes `nlp_example.py` to extract out all of the repeated training code, so that only the new additional code + is examined and checked. If something *other* than `nlp_example.py` should be used, such as `cv_example.py` for the + `complete_cv_example.py` script, it should be passed in for the `secondary_filename` parameter. + + Args: + base_filename (`str` or `os.PathLike`): + The filepath of a single "complete" example script to test, such as `examples/complete_cv_example.py` + feature_filename (`str` or `os.PathLike`): + The filepath of a single feature example script. The contents of this script are checked to see if they + exist in `base_filename` + parser_only (`bool`): + Whether to compare only the `main()` sections in both files, or to compare the contents of + `training_loop()` + secondary_filename (`str`, *optional*): + A potential secondary filepath that should be included in the check. This function extracts the base + functionalities off of "examples/nlp_example.py", so if `base_filename` is a script other than + `complete_nlp_example.py`, the template script should be included here. Such as `examples/cv_example.py` + """ + with open(base_filename) as f: + base_file_contents = f.readlines() + with open(os.path.abspath(os.path.join("examples", "nlp_example.py"))) as f: + full_file_contents = f.readlines() + with open(feature_filename) as f: + feature_file_contents = f.readlines() + if secondary_filename is not None: + with open(secondary_filename) as f: + secondary_file_contents = f.readlines() + + # This is our base, we remove all the code from here in our `full_filename` and `feature_filename` to find the new content + if parser_only: + base_file_func = clean_lines(get_function_contents_by_name(base_file_contents, "main")) + full_file_func = clean_lines(get_function_contents_by_name(full_file_contents, "main")) + feature_file_func = clean_lines(get_function_contents_by_name(feature_file_contents, "main")) + if secondary_filename is not None: + secondary_file_func = clean_lines(get_function_contents_by_name(secondary_file_contents, "main")) + else: + base_file_func = clean_lines(get_function_contents_by_name(base_file_contents, "training_function")) + full_file_func = clean_lines(get_function_contents_by_name(full_file_contents, "training_function")) + feature_file_func = clean_lines(get_function_contents_by_name(feature_file_contents, "training_function")) + if secondary_filename is not None: + secondary_file_func = clean_lines( + get_function_contents_by_name(secondary_file_contents, "training_function") + ) + + _dl_line = "train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)\n" + + # Specific code in our script that differs from the full version, aka what is new + new_feature_code = [] + passed_idxs = [] # We keep track of the idxs just in case it's a repeated statement + it = iter(feature_file_func) + for i in range(len(feature_file_func) - 1): + if i not in passed_idxs: + line = next(it) + if (line not in full_file_func) and (line.lstrip() != _dl_line): + if "TESTING_MOCKED_DATALOADERS" not in line: + new_feature_code.append(line) + passed_idxs.append(i) + else: + # Skip over the `config['num_epochs'] = 2` statement + _ = next(it) + + # Extract out just the new parts from the full_file_training_func + new_full_example_parts = [] + passed_idxs = [] # We keep track of the idxs just in case it's a repeated statement + for i, line in enumerate(base_file_func): + if i not in passed_idxs: + if (line not in full_file_func) and (line.lstrip() != _dl_line): + if "TESTING_MOCKED_DATALOADERS" not in line: + new_full_example_parts.append(line) + passed_idxs.append(i) + + # Finally, get the overall diff + diff_from_example = [line for line in new_feature_code if line not in new_full_example_parts] + if secondary_filename is not None: + diff_from_two = [line for line in full_file_contents if line not in secondary_file_func] + diff_from_example = [line for line in diff_from_example if line not in diff_from_two] + + return diff_from_example diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__init__.py b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c9cbe26c257b515f657c05e1996d517e69613972 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f459c82cc21fe336952bb838219f0270b07d058b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_cli.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_cli.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..098c18d8b3d69f78ad3f13b9a602a62cab69b9b1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_cli.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_distributed_data_loop.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_distributed_data_loop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..542cc1f031c11e59fadba4cabeae89b1d6d581bd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_distributed_data_loop.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_notebook.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_notebook.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ca384b9f0a3da776c282f448fe9e74eb19b0cd7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_notebook.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_ops.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50ae3f0f20393e982ec2ea3e1cd6d84ff2388d83 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_ops.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_script.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_script.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a30c93c39aae535975e4df948b0a60f430e34f9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_script.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_sync.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_sync.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70f2cf483a9e3a4da20f6f87539fd438153410db Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_sync.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__init__.py b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c9cbe26c257b515f657c05e1996d517e69613972 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40585b3ac4046b71106d4d9db199dfe58bdd9648 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_checkpointing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_checkpointing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a929020dbc699f236b72f8c7e8c5259904eaf6a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_checkpointing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_metrics.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_metrics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36bcde7a9bde16a643b8c305832820c14fea603b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_metrics.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_peak_memory_usage.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_peak_memory_usage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c81bd5b6173d47b2afedf36df248dec468981a3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_peak_memory_usage.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_performance.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_performance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..faec47e70bfc3290e739f1309a83de384ddad7e2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_performance.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_pippy.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_pippy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ba3029a7c73bfe0be4f3138e1a812e0c2b56cae Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_pippy.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_zero3_integration.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_zero3_integration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28ff9a20f051f810b72fbba9059d638ac0625f57 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_zero3_integration.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_checkpointing.py b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_checkpointing.py new file mode 100644 index 0000000000000000000000000000000000000000..41c77c7ec5e6e2475a795efdb54702600eac0282 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_checkpointing.py @@ -0,0 +1,268 @@ +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import json +import os + +import evaluate +import torch +from datasets import load_dataset +from torch.optim import AdamW +from torch.utils.data import DataLoader +from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed + +from accelerate import Accelerator, DistributedType +from accelerate.utils.deepspeed import DummyOptim, DummyScheduler + + +MAX_GPU_BATCH_SIZE = 16 +EVAL_BATCH_SIZE = 32 + + +def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name: str = "bert-base-cased"): + """ + Creates a set of `DataLoader`s for the `glue` dataset. + + Args: + accelerator (`Accelerator`): + An `Accelerator` object + batch_size (`int`, *optional*): + The batch size for the train and validation DataLoaders. + model_name (`str`, *optional*): + """ + tokenizer = AutoTokenizer.from_pretrained(model_name) + datasets = load_dataset("glue", "mrpc") + + def tokenize_function(examples): + # max_length=None => use the model max length (it's actually the default) + outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) + return outputs + + # Apply the method we just defined to all the examples in all the splits of the dataset + tokenized_datasets = datasets.map( + tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False + ) + + # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the + # transformers library + tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + + def collate_fn(examples): + # On TPU it's best to pad everything to the same length or training will be very slow. + if accelerator.distributed_type == DistributedType.XLA: + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + + # Instantiate dataloaders. + train_dataloader = DataLoader( + tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size + ) + eval_dataloader = DataLoader( + tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE + ) + + return train_dataloader, eval_dataloader + + +def evaluation_loop(accelerator, model, eval_dataloader, metric): + model.eval() + samples_seen = 0 + for step, batch in enumerate(eval_dataloader): + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch.to(accelerator.device) + with torch.no_grad(): + outputs = model(**batch) + predictions = outputs.logits.argmax(dim=-1) + # It is slightly faster to call this once, than multiple times + predictions, references = accelerator.gather( + (predictions, batch["labels"]) + ) # If we are in a multiprocess environment, the last batch has duplicates + if accelerator.use_distributed: + if step == len(eval_dataloader) - 1: + predictions = predictions[: len(eval_dataloader.dataset) - samples_seen] + references = references[: len(eval_dataloader.dataset) - samples_seen] + else: + samples_seen += references.shape[0] + metric.add_batch( + predictions=predictions, + references=references, + ) + + eval_metric = metric.compute() + return eval_metric["accuracy"] + + +def training_function(config, args): + # Initialize accelerator + accelerator = Accelerator() + + # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs + lr = config["lr"] + num_epochs = int(config["num_epochs"]) + seed = int(config["seed"]) + batch_size = int(config["batch_size"]) + model_name = args.model_name_or_path + + set_seed(seed) + train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name) + + # Instantiate the model (we build the model here so that the seed also control new weights initialization) + model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True) + + # Instantiate optimizer + optimizer_cls = ( + AdamW + if accelerator.state.deepspeed_plugin is None + or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config + else DummyOptim + ) + optimizer = optimizer_cls(params=model.parameters(), lr=lr) + + if accelerator.state.deepspeed_plugin is not None: + gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[ + "gradient_accumulation_steps" + ] + else: + gradient_accumulation_steps = 1 + max_training_steps = (len(train_dataloader) * num_epochs) // gradient_accumulation_steps + + # Instantiate scheduler + if ( + accelerator.state.deepspeed_plugin is None + or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config + ): + lr_scheduler = get_linear_schedule_with_warmup( + optimizer=optimizer, + num_warmup_steps=0, + num_training_steps=max_training_steps, + ) + else: + lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0) + + # Prepare everything + # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the + # prepare method. + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + + # We need to keep track of how many total steps we have iterated over + overall_step = 0 + # We also need to keep track of the stating epoch so files are named properly + starting_epoch = 0 + metric = evaluate.load("glue", "mrpc") + ending_epoch = num_epochs + + if args.partial_train_epoch is not None: + ending_epoch = args.partial_train_epoch + + if args.resume_from_checkpoint: + accelerator.load_state(args.resume_from_checkpoint) + epoch_string = args.resume_from_checkpoint.split("epoch_")[1] + state_epoch_num = "" + for char in epoch_string: + if char.isdigit(): + state_epoch_num += char + else: + break + starting_epoch = int(state_epoch_num) + 1 + accuracy = evaluation_loop(accelerator, model, eval_dataloader, metric) + accelerator.print("resumed checkpoint performance:", accuracy) + accelerator.print("resumed checkpoint's scheduler's lr:", lr_scheduler.get_lr()[0]) + accelerator.print("resumed optimizers's lr:", optimizer.param_groups[0]["lr"]) + with open(os.path.join(args.output_dir, f"state_{starting_epoch - 1}.json")) as f: + resumed_state = json.load(f) + assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" + assert ( + resumed_state["lr"] == lr_scheduler.get_lr()[0] + ), "Scheduler learning rate mismatch, loading from checkpoint failed" + assert ( + resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] + ), "Optimizer learning rate mismatch, loading from checkpoint failed" + assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" + return + + # Now we train the model + state = {} + for epoch in range(starting_epoch, ending_epoch): + model.train() + for step, batch in enumerate(train_dataloader): + outputs = model(**batch) + loss = outputs.loss + loss = loss / gradient_accumulation_steps + accelerator.backward(loss) + if step % gradient_accumulation_steps == 0: + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + overall_step += 1 + output_dir = f"epoch_{epoch}" + output_dir = os.path.join(args.output_dir, output_dir) + accelerator.save_state(output_dir) + accuracy = evaluation_loop(accelerator, model, eval_dataloader, metric) + state["accuracy"] = accuracy + state["lr"] = lr_scheduler.get_lr()[0] + state["optimizer_lr"] = optimizer.param_groups[0]["lr"] + state["epoch"] = epoch + state["step"] = overall_step + accelerator.print(f"epoch {epoch}:", state) + + accelerator.wait_for_everyone() + if accelerator.is_main_process: + with open(os.path.join(args.output_dir, f"state_{epoch}.json"), "w") as f: + json.dump(state, f) + + +def main(): + parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.") + parser.add_argument( + "--model_name_or_path", + type=str, + default="bert-base-cased", + help="Path to pretrained model or model identifier from huggingface.co/models.", + required=False, + ) + parser.add_argument( + "--output_dir", + type=str, + default=".", + help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", + ) + parser.add_argument( + "--resume_from_checkpoint", + type=str, + default=None, + help="If the training should continue from a checkpoint folder.", + ) + parser.add_argument( + "--partial_train_epoch", + type=int, + default=None, + help="If passed, the training will stop after this number of epochs.", + ) + parser.add_argument( + "--num_epochs", + type=int, + default=2, + help="Number of train epochs.", + ) + args = parser.parse_args() + config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} + + training_function(config, args) + + +if __name__ == "__main__": + main() diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_metrics.py b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..9ac13aba6266f31f1e0f9eb41b961fc2933d00ab --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_metrics.py @@ -0,0 +1,306 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import math +import os +from copy import deepcopy + +import datasets +import evaluate +import torch +import transformers +from datasets import load_dataset +from torch.utils.data import DataLoader, IterableDataset +from transformers import AutoModelForSequenceClassification, AutoTokenizer + +from accelerate import Accelerator, DataLoaderConfiguration, DistributedType +from accelerate.data_loader import DataLoaderDispatcher +from accelerate.test_utils import RegressionDataset, RegressionModel, torch_device +from accelerate.utils import is_torch_xla_available, set_seed + + +os.environ["TRANSFORMERS_NO_ADVISORY_WARNINGS"] = "true" + + +class ListHandler(logging.Handler): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.logs = [] + + def emit(self, record): + self.logs.append(record) + + +def get_basic_setup(accelerator, num_samples=82, batch_size=16): + "Returns everything needed to perform basic training" + set_seed(42) + model = RegressionModel() + ddp_model = deepcopy(model) + dset = RegressionDataset(length=num_samples) + dataloader = DataLoader(dset, batch_size=batch_size) + model.to(accelerator.device) + ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader) + return model, ddp_model, dataloader + + +def get_dataloader(accelerator: Accelerator, use_longest=False): + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased") + dataset = load_dataset("glue", "mrpc", split="validation") + + def tokenize_function(examples): + outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) + return outputs + + with accelerator.main_process_first(): + tokenized_datasets = dataset.map( + tokenize_function, + batched=True, + remove_columns=["idx", "sentence1", "sentence2"], + ) + + tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + + def collate_fn(examples): + if use_longest: + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + + return DataLoader(tokenized_datasets, shuffle=False, collate_fn=collate_fn, batch_size=16) + + +def get_mrpc_setup(dispatch_batches, split_batches): + dataloader_config = DataLoaderConfiguration(dispatch_batches=dispatch_batches, split_batches=split_batches) + accelerator = Accelerator(dataloader_config=dataloader_config) + dataloader = get_dataloader(accelerator, not dispatch_batches) + model = AutoModelForSequenceClassification.from_pretrained( + "hf-internal-testing/mrpc-bert-base-cased", return_dict=True + ) + ddp_model, ddp_dataloader = accelerator.prepare(model, dataloader) + return { + "ddp": [ddp_model, ddp_dataloader, torch_device], + "no": [model, dataloader, accelerator.device], + }, accelerator + + +def generate_predictions(model, dataloader, accelerator): + logits_and_targets = [] + for batch in dataloader: + input, target = batch.values() + with torch.no_grad(): + logit = model(input) + logit, target = accelerator.gather_for_metrics((logit, target)) + logits_and_targets.append((logit, target)) + logits, targs = [], [] + for logit, targ in logits_and_targets: + logits.append(logit) + targs.append(targ) + logits, targs = torch.cat(logits), torch.cat(targs) + return logits, targs + + +def test_torch_metrics( + accelerator: Accelerator, num_samples=82, dispatch_batches=False, split_batches=False, batch_size=16 +): + _, ddp_model, dataloader = get_basic_setup(accelerator, num_samples, batch_size) + logits, _ = generate_predictions(ddp_model, dataloader, accelerator) + assert ( + len(logits) == num_samples + ), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(logits)}" + + +def test_mrpc(dispatch_batches: bool = False, split_batches: bool = False): + metric = evaluate.load("glue", "mrpc") + setup, accelerator = get_mrpc_setup(dispatch_batches, split_batches) + # First do baseline + model, dataloader, device = setup["no"] + model.to(device) + model.eval() + for batch in dataloader: + batch.to(device) + with torch.inference_mode(): + outputs = model(**batch) + preds = outputs.logits.argmax(dim=-1) + metric.add_batch(predictions=preds, references=batch["labels"]) + baseline = metric.compute() + + # Then do distributed + model, dataloader, device = setup["ddp"] + model.eval() + for batch in dataloader: + with torch.inference_mode(): + outputs = model(**batch) + preds = outputs.logits.argmax(dim=-1) + references = batch["labels"] + preds, references = accelerator.gather_for_metrics((preds, references)) + metric.add_batch(predictions=preds, references=references) + distributed = metric.compute() + + for key in "accuracy f1".split(): + assert math.isclose( + baseline[key], distributed[key] + ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n" + + +def test_gather_for_metrics_with_non_tensor_objects_iterable_dataset(): + class DummyIterableDataset(IterableDataset): + def __init__(self, data): + self.data = data + + def __len__(self): + return len(self.data) + + def __iter__(self): + yield from self.data + + iterable_dataset = DummyIterableDataset([n for n in range(30)]) + dataloader = DataLoader(iterable_dataset, batch_size=4) + accelerator = Accelerator() + prepared_dataloader = accelerator.prepare(dataloader) + + if accelerator.is_main_process: + logger = logging.root.manager.loggerDict["accelerate.accelerator"] + list_handler = ListHandler() + logger.addHandler(list_handler) + + batches_for_metrics = [] + for batch in prepared_dataloader: + batches_for_metrics.append(accelerator.gather_for_metrics(batch)) + + assert torch.cat(batches_for_metrics).size(0) == 30 + + if accelerator.is_main_process: + assert len(list_handler.logs) == 0 + logger.removeHandler(list_handler) + + +def test_gather_for_metrics_with_iterable_dataset(): + class DummyIterableDataset(IterableDataset): + def __init__(self, data): + self.data = data + + def __len__(self): + return len(self.data) + + def __iter__(self): + yield from self.data + + iterable_dataset = DummyIterableDataset(torch.as_tensor(range(30))) + dataloader = DataLoader(iterable_dataset, batch_size=4) + + accelerator = Accelerator() + prepared_dataloader = accelerator.prepare(dataloader) + + assert isinstance(prepared_dataloader, DataLoaderDispatcher) + + if accelerator.is_main_process: + logger = logging.root.manager.loggerDict["accelerate.accelerator"] + list_handler = ListHandler() + logger.addHandler(list_handler) + + batches_for_metrics = [] + for batch in prepared_dataloader: + batches_for_metrics.append(accelerator.gather_for_metrics(batch)) + + assert torch.cat(batches_for_metrics).size(0) == 30 + + if accelerator.is_main_process: + assert len(list_handler.logs) == 0 + + logger.removeHandler(list_handler) + + +def test_gather_for_metrics_drop_last(): + accelerator = Accelerator() + per_device_batch_size = 5 + num_items = (10 * accelerator.num_processes) + 1 + dataloader = DataLoader(range(num_items), batch_size=per_device_batch_size, drop_last=True) + dataloader = accelerator.prepare(dataloader) + + iterator = iter(dataloader) + next(iterator) # Skip first batch tensor([0, 1, 2, 3, 4], device='cuda:0') + batch = next(iterator) + gathered_items = accelerator.gather_for_metrics(batch) + + # Should return a full set of complete batches from each GPU + num_expected_items = per_device_batch_size * accelerator.num_processes + assert gathered_items.size(0) == ( + num_expected_items + ), f"Expected number of items: {num_expected_items}, Actual: {gathered_items.size(0)}" + + +def main(): + dataloader_config = DataLoaderConfiguration(split_batches=False, dispatch_batches=False) + accelerator = Accelerator(dataloader_config=dataloader_config) + if accelerator.is_local_main_process: + datasets.utils.logging.set_verbosity_warning() + transformers.utils.logging.set_verbosity_warning() + else: + datasets.utils.logging.set_verbosity_error() + transformers.utils.logging.set_verbosity_error() + # TorchXLA does not support batch dispatching. 'put_on_device' is always False for + # TorchXLA, which can cause a value error in 'prepare_data_loader' function. + dispatch_batches_options = [False] if accelerator.state.distributed_type == DistributedType.XLA else [True, False] + + # Temporarily close this test for TorchXLA due to the 'Cannot set version_counter for + # inference tensor' error in inference mode. Reopen it after TorchXLA fixes this bug. + # These are a bit slower so they should only be ran on the GPU or TPU + if accelerator.device.type != "cpu" and not is_torch_xla_available(): + if accelerator.is_local_main_process: + print("**Testing gather_for_metrics**") + for split_batches in [True, False]: + for dispatch_batches in dispatch_batches_options: + if accelerator.is_local_main_process: + print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`") + test_mrpc(dispatch_batches, split_batches) + accelerator.state._reset_state() + print("test_gather_for_metrics_with_iterable_dataset") + test_gather_for_metrics_with_iterable_dataset() + print("test gather_for_metrics_with_non_tensor_objects_iterable_dataset") + test_gather_for_metrics_with_non_tensor_objects_iterable_dataset() + + # MpDeviceLoader in TorchXLA is an asynchronous loader that preloads several batches into cache. + # This can cause the 'end_of_dataloader' of DataLoaderStateMixin to be set earlier than intended. + # Skip this test when TorchXLA is enabled. + if accelerator.state.distributed_type != DistributedType.XLA: + if accelerator.is_local_main_process: + print("**Test torch metrics**") + for split_batches in [True, False]: + for dispatch_batches in dispatch_batches_options: + dataloader_config = DataLoaderConfiguration( + split_batches=split_batches, dispatch_batches=dispatch_batches + ) + accelerator = Accelerator(dataloader_config=dataloader_config) + if accelerator.is_local_main_process: + print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99") + test_torch_metrics(accelerator, 99) + accelerator.state._reset_state() + if accelerator.is_local_main_process: + print("**Test last batch is not dropped when perfectly divisible**") + accelerator = Accelerator() + test_torch_metrics(accelerator, 512) + accelerator.state._reset_state() + if accelerator.is_local_main_process: + print("**Test that `drop_last` is taken into account**") + test_gather_for_metrics_drop_last() + accelerator.state._reset_state() + + +def _mp_fn(index): + # For xla_spawn (TPUs) + main() + + +if __name__ == "__main__": + main() diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py new file mode 100644 index 0000000000000000000000000000000000000000..aeb55f6c87d7831ed5d7f370a4b9d7810777bd3e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py @@ -0,0 +1,282 @@ +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import gc +import json +import os + +import torch +from datasets import load_dataset +from torch.optim import AdamW +from torch.utils.data import DataLoader +from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed + +from accelerate import Accelerator, DistributedType +from accelerate.utils import is_mlu_available, is_npu_available, is_xpu_available +from accelerate.utils.deepspeed import DummyOptim, DummyScheduler + + +MAX_GPU_BATCH_SIZE = 16 +EVAL_BATCH_SIZE = 32 + + +# Converting Bytes to Megabytes +def b2mb(x): + return int(x / 2**20) + + +# This context manager is used to track the peak memory usage of the process +class TorchTracemalloc: + def __enter__(self): + gc.collect() + if torch.cuda.is_available(): + torch.cuda.empty_cache() + torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero + self.begin = torch.cuda.memory_allocated() + elif is_mlu_available(): + torch.mlu.empty_cache() + torch.mlu.reset_max_memory_allocated() # reset the peak gauge to zero + self.begin = torch.mlu.memory_allocated() + elif is_npu_available(): + torch.npu.empty_cache() + torch.npu.reset_max_memory_allocated() # reset the peak gauge to zero + self.begin = torch.npu.memory_allocated() + elif is_xpu_available(): + torch.xpu.empty_cache() + torch.xpu.reset_max_memory_allocated() # reset the peak gauge to zero + self.begin = torch.xpu.memory_allocated() + return self + + def __exit__(self, *exc): + gc.collect() + if torch.cuda.is_available(): + torch.cuda.empty_cache() + self.end = torch.cuda.memory_allocated() + self.peak = torch.cuda.max_memory_allocated() + elif is_mlu_available(): + torch.mlu.empty_cache() + torch.mlu.memory_allocated() # reset the peak gauge to zero + self.begin = torch.mlu.max_memory_allocated() + elif is_npu_available(): + torch.npu.empty_cache() + self.end = torch.npu.memory_allocated() + self.peak = torch.npu.max_memory_allocated() + elif is_xpu_available(): + torch.xpu.empty_cache() + self.end = torch.xpu.memory_allocated() + self.peak = torch.xpu.max_memory_allocated() + self.used = b2mb(self.end - self.begin) + self.peaked = b2mb(self.peak - self.begin) + # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") + + +def get_dataloaders( + accelerator: Accelerator, + batch_size: int = 16, + model_name: str = "bert-base-cased", + n_train: int = 320, + n_val: int = 160, +): + """ + Creates a set of `DataLoader`s for the `glue` dataset. + + Args: + accelerator (`Accelerator`): + An `Accelerator` object + batch_size (`int`, *optional*): + The batch size for the train and validation DataLoaders. + model_name (`str`, *optional*): + The name of the model to use. + n_train (`int`, *optional*): + The number of training examples to use. + n_val (`int`, *optional*): + The number of validation examples to use. + """ + tokenizer = AutoTokenizer.from_pretrained(model_name) + datasets = load_dataset( + "glue", "mrpc", split={"train": f"train[:{n_train}]", "validation": f"validation[:{n_val}]"} + ) + + def tokenize_function(examples): + # max_length=None => use the model max length (it's actually the default) + outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) + return outputs + + # Apply the method we just defined to all the examples in all the splits of the dataset + tokenized_datasets = datasets.map( + tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False + ) + + # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the + # transformers library + tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + + def collate_fn(examples): + # On TPU it's best to pad everything to the same length or training will be very slow. + if accelerator.distributed_type == DistributedType.XLA: + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + + # Instantiate dataloaders. + train_dataloader = DataLoader( + tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size + ) + eval_dataloader = DataLoader( + tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE + ) + + return train_dataloader, eval_dataloader + + +def training_function(config, args): + # Initialize accelerator + accelerator = Accelerator() + + # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs + lr = config["lr"] + num_epochs = int(config["num_epochs"]) + seed = int(config["seed"]) + batch_size = int(config["batch_size"]) + model_name = args.model_name_or_path + + set_seed(seed) + train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name, args.n_train, args.n_val) + + # Instantiate the model (we build the model here so that the seed also control new weights initialization) + model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True) + + # Instantiate optimizer + optimizer_cls = ( + AdamW + if accelerator.state.deepspeed_plugin is None + or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config + else DummyOptim + ) + optimizer = optimizer_cls(params=model.parameters(), lr=lr) + + if accelerator.state.deepspeed_plugin is not None: + gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[ + "gradient_accumulation_steps" + ] + else: + gradient_accumulation_steps = 1 + max_training_steps = (len(train_dataloader) * num_epochs) // gradient_accumulation_steps + + # Instantiate scheduler + if ( + accelerator.state.deepspeed_plugin is None + or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config + ): + lr_scheduler = get_linear_schedule_with_warmup( + optimizer=optimizer, + num_warmup_steps=0, + num_training_steps=max_training_steps, + ) + else: + lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0) + + # Prepare everything + # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the + # prepare method. + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + + # We need to keep track of how many total steps we have iterated over + overall_step = 0 + # We also need to keep track of the stating epoch so files are named properly + starting_epoch = 0 + + # Now we train the model + train_total_peak_memory = {} + for epoch in range(starting_epoch, num_epochs): + with TorchTracemalloc() as tracemalloc: + model.train() + for step, batch in enumerate(train_dataloader): + outputs = model(**batch) + loss = outputs.loss + loss = loss / gradient_accumulation_steps + accelerator.backward(loss) + if step % gradient_accumulation_steps == 0: + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + overall_step += 1 + + # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage + accelerator.print(f"Memory before entering the train : {b2mb(tracemalloc.begin)}") + accelerator.print(f"Memory consumed at the end of the train (end-begin): {tracemalloc.used}") + accelerator.print(f"Peak Memory consumed during the train (max-begin): {tracemalloc.peaked}") + accelerator.print( + f"Total Peak Memory consumed during the train (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}" + ) + train_total_peak_memory[f"epoch-{epoch}"] = tracemalloc.peaked + b2mb(tracemalloc.begin) + if args.peak_memory_upper_bound is not None: + assert ( + train_total_peak_memory[f"epoch-{epoch}"] <= args.peak_memory_upper_bound + ), "Peak memory usage exceeded the upper bound" + + accelerator.wait_for_everyone() + if accelerator.is_main_process: + with open(os.path.join(args.output_dir, "peak_memory_utilization.json"), "w") as f: + json.dump(train_total_peak_memory, f) + + +def main(): + parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.") + parser.add_argument( + "--model_name_or_path", + type=str, + default="bert-base-cased", + help="Path to pretrained model or model identifier from huggingface.co/models.", + required=False, + ) + parser.add_argument( + "--output_dir", + type=str, + default=".", + help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", + ) + parser.add_argument( + "--peak_memory_upper_bound", + type=float, + default=None, + help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.", + ) + parser.add_argument( + "--n_train", + type=int, + default=320, + help="Number of training examples to use.", + ) + parser.add_argument( + "--n_val", + type=int, + default=160, + help="Number of validation examples to use.", + ) + parser.add_argument( + "--num_epochs", + type=int, + default=1, + help="Number of train epochs.", + ) + args = parser.parse_args() + config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} + training_function(config, args) + + +if __name__ == "__main__": + main() diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_performance.py b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_performance.py new file mode 100644 index 0000000000000000000000000000000000000000..7051859aa74bbac5b15e4465395b8177e3dd1d27 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_performance.py @@ -0,0 +1,243 @@ +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import json +import os + +import evaluate +import torch +from datasets import load_dataset +from torch.optim import AdamW +from torch.utils.data import DataLoader +from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed + +from accelerate import Accelerator, DistributedType +from accelerate.utils.deepspeed import DummyOptim, DummyScheduler + + +MAX_GPU_BATCH_SIZE = 16 +EVAL_BATCH_SIZE = 32 + + +def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name: str = "bert-base-cased"): + """ + Creates a set of `DataLoader`s for the `glue` dataset. + + Args: + accelerator (`Accelerator`): + An `Accelerator` object + batch_size (`int`, *optional*): + The batch size for the train and validation DataLoaders. + model_name (`str`, *optional*): + """ + tokenizer = AutoTokenizer.from_pretrained(model_name) + datasets = load_dataset("glue", "mrpc") + + def tokenize_function(examples): + # max_length=None => use the model max length (it's actually the default) + outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) + return outputs + + # Apply the method we just defined to all the examples in all the splits of the dataset + tokenized_datasets = datasets.map( + tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False + ) + + # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the + # transformers library + tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + + def collate_fn(examples): + # On TPU it's best to pad everything to the same length or training will be very slow. + if accelerator.distributed_type == DistributedType.XLA: + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + + # Instantiate dataloaders. + train_dataloader = DataLoader( + tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size + ) + eval_dataloader = DataLoader( + tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE + ) + + return train_dataloader, eval_dataloader + + +def training_function(config, args): + # Initialize accelerator + accelerator = Accelerator() + + # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs + lr = config["lr"] + num_epochs = int(config["num_epochs"]) + seed = int(config["seed"]) + batch_size = int(config["batch_size"]) + model_name = args.model_name_or_path + + set_seed(seed) + train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name) + + # Instantiate the model (we build the model here so that the seed also control new weights initialization) + model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True) + + # Instantiate optimizer + optimizer_cls = ( + AdamW + if accelerator.state.deepspeed_plugin is None + or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config + else DummyOptim + ) + optimizer = optimizer_cls(params=model.parameters(), lr=lr) + + max_training_steps = len(train_dataloader) * num_epochs + + # Instantiate scheduler + linear_decay_scheduler = False + if ( + accelerator.state.deepspeed_plugin is None + or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config + ): + lr_scheduler = get_linear_schedule_with_warmup( + optimizer=optimizer, + num_warmup_steps=0, + num_training_steps=max_training_steps, + ) + linear_decay_scheduler = True + else: + lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0) + + # Prepare everything + # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the + # prepare method. + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + + # We also need to keep track of the stating epoch so files are named properly + starting_epoch = 0 + + # Now we train the model + metric = evaluate.load("glue", "mrpc") + best_performance = 0 + performance_metric = {} + expected_lr_after_first_optim_step = lr * ( + 1 - 1 / (max_training_steps / accelerator.num_processes / accelerator.gradient_accumulation_steps) + ) + lr_scheduler_check_completed = False + for epoch in range(starting_epoch, num_epochs): + model.train() + for step, batch in enumerate(train_dataloader): + with accelerator.accumulate(model): + outputs = model(**batch) + loss = outputs.loss + accelerator.backward(loss) + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + # assert the learning rate after first optimizer step + if ( + accelerator.sync_gradients + and not lr_scheduler_check_completed + and linear_decay_scheduler + and accelerator.state.mixed_precision == "no" + ): + assert ( + lr_scheduler.get_last_lr()[0] == expected_lr_after_first_optim_step + ), f"Wrong lr found at second step, expected {expected_lr_after_first_optim_step}, got {lr_scheduler.get_last_lr()[0]}" + lr_scheduler_check_completed = True + + model.eval() + samples_seen = 0 + for step, batch in enumerate(eval_dataloader): + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch.to(accelerator.device) + with torch.no_grad(): + outputs = model(**batch) + predictions = outputs.logits.argmax(dim=-1) + # It is slightly faster to call this once, than multiple times + predictions, references = accelerator.gather( + (predictions, batch["labels"]) + ) # If we are in a multiprocess environment, the last batch has duplicates + if accelerator.use_distributed: + if step == len(eval_dataloader) - 1: + predictions = predictions[: len(eval_dataloader.dataset) - samples_seen] + references = references[: len(eval_dataloader.dataset) - samples_seen] + else: + samples_seen += references.shape[0] + metric.add_batch( + predictions=predictions, + references=references, + ) + + eval_metric = metric.compute() + # Use accelerator.print to print only on the main process. + accelerator.print(f"epoch {epoch}:", eval_metric) + performance_metric[f"epoch-{epoch}"] = eval_metric["accuracy"] + + if best_performance < eval_metric["accuracy"]: + best_performance = eval_metric["accuracy"] + + # check that the LR is 0 + if linear_decay_scheduler and accelerator.state.mixed_precision == "no": + assert ( + lr_scheduler.get_last_lr()[0] == 0 + ), f"Wrong lr found at last step, expected 0, got {lr_scheduler.get_last_lr()[0]}" + + if args.performance_lower_bound is not None: + assert ( + args.performance_lower_bound <= best_performance + ), f"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}" + + accelerator.wait_for_everyone() + if accelerator.is_main_process: + with open(os.path.join(args.output_dir, "all_results.json"), "w") as f: + json.dump(performance_metric, f) + + +def main(): + parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.") + parser.add_argument( + "--model_name_or_path", + type=str, + default="bert-base-cased", + help="Path to pretrained model or model identifier from huggingface.co/models.", + required=False, + ) + parser.add_argument( + "--output_dir", + type=str, + default=".", + help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", + ) + parser.add_argument( + "--performance_lower_bound", + type=float, + default=None, + help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.", + ) + parser.add_argument( + "--num_epochs", + type=int, + default=3, + help="Number of train epochs.", + ) + args = parser.parse_args() + config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} + training_function(config, args) + + +if __name__ == "__main__": + main() diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_pippy.py b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_pippy.py new file mode 100644 index 0000000000000000000000000000000000000000..f589365649d56fd690b4f4104a8838f885183527 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_pippy.py @@ -0,0 +1,129 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch +from torchvision.models import resnet34 +from transformers import ( + BertConfig, + BertForMaskedLM, + GPT2Config, + GPT2ForSequenceClassification, + T5Config, + T5ForConditionalGeneration, +) + +from accelerate import PartialState +from accelerate.inference import prepare_pippy +from accelerate.utils import DistributedType, send_to_device, set_seed + + +model_to_config = { + "t5": (T5ForConditionalGeneration, T5Config, 1024), + "bert": (BertForMaskedLM, BertConfig, 512), + "gpt2": (GPT2ForSequenceClassification, GPT2Config, 1024), +} + + +def get_model_and_data_for_text(model_name, device, num_processes: int = 2): + initializer, config, seq_len = model_to_config[model_name] + config_args = {} + # Eventually needed for batch inference tests on gpt-2 when bs != 1 + # if model_name == "gpt2": + # config_args["pad_token_id"] = 0 + model_config = config(**config_args) + model = initializer(model_config) + return model, torch.randint( + low=0, + high=model_config.vocab_size, + size=(num_processes, seq_len), + device=device, + dtype=torch.int64, + requires_grad=False, + ) + + +def test_gpt2(batch_size: int = 2): + set_seed(42) + state = PartialState() + model, inputs = get_model_and_data_for_text("gpt2", "cpu", batch_size) + model = prepare_pippy(model, example_args=(inputs,), no_split_module_classes=model._no_split_modules) + # For inference args need to be a tuple + inputs = inputs.to("cuda") + with torch.no_grad(): + output = model(inputs) + # Zach: Check that we just grab the real outputs we need at the end + if not state.is_last_process: + assert output is None, "Output was not generated on just the last process!" + else: + assert output is not None, "Output was not generated in the last process!" + + +def test_t5(batch_size: int = 2): + set_seed(42) + state = PartialState() + model, inputs = get_model_and_data_for_text("t5", "cpu", batch_size) + example_inputs = {"input_ids": inputs, "decoder_input_ids": inputs} + model = prepare_pippy( + model, + no_split_module_classes=model._no_split_modules, + example_kwargs=example_inputs, + ) + # For inference args need to be a tuple + inputs = send_to_device(example_inputs, "cuda:0") + with torch.no_grad(): + output = model(*inputs.values()) + # Zach: Check that we just grab the real outputs we need at the end + if not state.is_last_process: + assert output is None, "Output was not generated on just the last process!" + else: + assert output is not None, "Output was not generated in the last process!" + + +def test_resnet(batch_size: int = 2): + set_seed(42) + state = PartialState() + model = resnet34() + input_tensor = torch.rand(batch_size, 3, 224, 224) + model = prepare_pippy( + model, + example_args=(input_tensor,), + ) + inputs = send_to_device(input_tensor, "cuda:0") + with torch.no_grad(): + output = model(inputs) + # Zach: Check that we just grab the real outputs we need at the end + if not state.is_last_process: + assert output is None, "Output was not generated on just the last process!" + else: + assert output is not None, "Output was not generated in the last process!" + + +if __name__ == "__main__": + state = PartialState() + state.print("Testing pippy integration...") + if state.distributed_type == DistributedType.MULTI_GPU: + state.print("Testing GPT2...") + test_gpt2() + # Issue: When modifying the tokenizer for batch GPT2 inference, there's an issue + # due to references + # NameError: cannot access free variable 'chunk_args_list' where it is not associated with a value in enclosing scope + # test_gpt2(3) + state.print("Testing T5...") + test_t5() + test_t5(1) + test_t5(3) + state.print("Testing CV model...") + test_resnet() + test_resnet(3) + else: + print("Less than two GPUs found, not running tests!") diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_zero3_integration.py b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_zero3_integration.py new file mode 100644 index 0000000000000000000000000000000000000000..67e78a7d37c0b82113e1cdbb3e76987b24c8494f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_zero3_integration.py @@ -0,0 +1,52 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch.distributed + +from accelerate.test_utils import require_huggingface_suite +from accelerate.utils import is_transformers_available + + +if is_transformers_available(): + from transformers import AutoModel, TrainingArguments + + +GPT2_TINY = "sshleifer/tiny-gpt2" + + +@require_huggingface_suite +def init_torch_dist_then_launch_deepspeed(): + torch.distributed.init_process_group(backend="nccl") + deepspeed_config = { + "zero_optimization": { + "stage": 3, + }, + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + } + train_args = TrainingArguments( + output_dir="./", + deepspeed=deepspeed_config, + ) + model = AutoModel.from_pretrained(GPT2_TINY) + assert train_args is not None + assert model is not None + + +def main(): + init_torch_dist_then_launch_deepspeed() + + +if __name__ == "__main__": + main() diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_cli.py b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_cli.py new file mode 100644 index 0000000000000000000000000000000000000000..c85828cd49624372ae1866082e5580c60f8c9293 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_cli.py @@ -0,0 +1,26 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch + + +def main(): + if torch.cuda.is_available(): + num_gpus = torch.cuda.device_count() + else: + num_gpus = 0 + print(f"Successfully ran on {num_gpus} GPUs") + + +if __name__ == "__main__": + main() diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_distributed_data_loop.py b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_distributed_data_loop.py new file mode 100644 index 0000000000000000000000000000000000000000..0bd127e832d9cd4d004af52dfb178f97bf99a80e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_distributed_data_loop.py @@ -0,0 +1,312 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import warnings +from typing import List +from unittest.mock import Mock + +import torch +from torch.utils.data import ( + BatchSampler, + DataLoader, + Dataset, + IterableDataset, + RandomSampler, + TensorDataset, + default_collate, +) + +from accelerate.accelerator import Accelerator, DataLoaderConfiguration +from accelerate.utils.dataclasses import DistributedType + + +NUM_ELEMENTS = 22 +NUM_WORKERS = 4 +BATCH_SIZE = 4 + + +class DummyDataset(Dataset): + def __len__(self): + return NUM_ELEMENTS + + def __getitem__(self, index): + squeeze = False + + if isinstance(index, int): + index = [index] + squeeze = True + elif isinstance(index, slice): + index = list(range(*index.indices(self.size))) + else: + index = list(index) + + batch = [{"index": i, "label": i % 2, "random_augmentation": torch.rand(1).item()} for i in index] + + if squeeze: + batch = batch[0] + + return batch + + +class DummyIterableDataset(IterableDataset): + def __init__(self, data): + self.data = data + + def __iter__(self): + yield from self.data + + +def create_accelerator(even_batches=True): + dataloader_config = DataLoaderConfiguration(even_batches=even_batches) + accelerator = Accelerator(dataloader_config=dataloader_config) + assert accelerator.num_processes == 2, "this script expects that two GPUs are available" + return accelerator + + +def create_dataloader(accelerator: Accelerator, dataset_size: int, batch_size: int, iterable: bool = False): + """ + Create a simple DataLoader to use during the test cases + """ + if iterable: + dataset = DummyIterableDataset(torch.as_tensor(range(dataset_size))) + else: + dataset = TensorDataset(torch.as_tensor(range(dataset_size))) + + dl = DataLoader(dataset, batch_size=batch_size) + dl = accelerator.prepare(dl) + + return dl + + +def verify_dataloader_batch_sizes( + accelerator: Accelerator, + dataset_size: int, + batch_size: int, + process_0_expected_batch_sizes: List[int], + process_1_expected_batch_sizes: List[int], +): + """ + A helper function for verifying the batch sizes coming from a prepared dataloader in each process + """ + dl = create_dataloader(accelerator=accelerator, dataset_size=dataset_size, batch_size=batch_size) + + batch_sizes = [len(batch[0]) for batch in dl] + + if accelerator.process_index == 0: + assert batch_sizes == process_0_expected_batch_sizes + elif accelerator.process_index == 1: + assert batch_sizes == process_1_expected_batch_sizes + + +def test_default_ensures_even_batch_sizes(): + accelerator = create_accelerator() + + # without padding, we would expect a different number of batches + verify_dataloader_batch_sizes( + accelerator, + dataset_size=3, + batch_size=1, + process_0_expected_batch_sizes=[1, 1], + process_1_expected_batch_sizes=[1, 1], + ) + + # without padding, we would expect the same number of batches, but different sizes + verify_dataloader_batch_sizes( + accelerator, + dataset_size=7, + batch_size=2, + process_0_expected_batch_sizes=[2, 2], + process_1_expected_batch_sizes=[2, 2], + ) + + +def test_can_disable_even_batches(): + accelerator = create_accelerator(even_batches=False) + + verify_dataloader_batch_sizes( + accelerator, + dataset_size=3, + batch_size=1, + process_0_expected_batch_sizes=[1, 1], + process_1_expected_batch_sizes=[1], + ) + + verify_dataloader_batch_sizes( + accelerator, + dataset_size=7, + batch_size=2, + process_0_expected_batch_sizes=[2, 2], + process_1_expected_batch_sizes=[2, 1], + ) + + +def test_can_join_uneven_inputs(): + accelerator = create_accelerator(even_batches=False) + + model = torch.nn.Linear(1, 1) + ddp_model = accelerator.prepare(model) + + dl = create_dataloader(accelerator, dataset_size=3, batch_size=1) + + batch_idxs = [] + with accelerator.join_uneven_inputs([ddp_model]): + for batch_idx, batch in enumerate(dl): + output = ddp_model(batch[0].float()) + loss = output.sum() + loss.backward() + batch_idxs.append(batch_idx) + + accelerator.wait_for_everyone() + + if accelerator.process_index == 0: + assert batch_idxs == [0, 1] + elif accelerator.process_index == 1: + assert batch_idxs == [0] + + +def test_join_raises_warning_for_non_ddp_distributed(accelerator): + with warnings.catch_warnings(record=True) as w: + with accelerator.join_uneven_inputs([Mock()]): + pass + + assert issubclass(w[-1].category, UserWarning) + assert "only supported for multi-GPU" in str(w[-1].message) + + +def test_join_can_override_even_batches(): + default_even_batches = True + overridden_even_batches = False + accelerator = create_accelerator(even_batches=default_even_batches) + model = torch.nn.Linear(1, 1) + ddp_model = accelerator.prepare(model) + train_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1) + valid_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1) + + with accelerator.join_uneven_inputs([ddp_model], even_batches=overridden_even_batches): + train_dl_overridden_value = train_dl.batch_sampler.even_batches + valid_dl_overridden_value = valid_dl.batch_sampler.even_batches + + assert train_dl_overridden_value == overridden_even_batches + assert valid_dl_overridden_value == overridden_even_batches + assert train_dl.batch_sampler.even_batches == default_even_batches + assert valid_dl.batch_sampler.even_batches == default_even_batches + + +def test_join_can_override_for_mixed_type_dataloaders(): + default_even_batches = True + overridden_even_batches = False + accelerator = create_accelerator(even_batches=default_even_batches) + model = torch.nn.Linear(1, 1) + ddp_model = accelerator.prepare(model) + create_dataloader(accelerator, dataset_size=3, batch_size=1, iterable=True) + batch_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + try: + with accelerator.join_uneven_inputs([ddp_model], even_batches=overridden_even_batches): + batch_dl_overridden_value = batch_dl.batch_sampler.even_batches + except AttributeError: + # ensure attribute error is not raised when processing iterable dl + raise AssertionError + + assert batch_dl_overridden_value == overridden_even_batches + assert batch_dl.batch_sampler.even_batches == default_even_batches + + +def test_join_raises_warning_for_iterable_when_overriding_even_batches(): + accelerator = create_accelerator() + model = torch.nn.Linear(1, 1) + ddp_model = accelerator.prepare(model) + create_dataloader(accelerator, dataset_size=3, batch_size=1, iterable=True) + + with warnings.catch_warnings(record=True) as w: + with accelerator.join_uneven_inputs([ddp_model], even_batches=False): + pass + + assert issubclass(w[-1].category, UserWarning) + assert "only supported for map-style datasets" in str(w[-1].message) + + +def test_data_loader(data_loader, accelerator): + # Prepare the DataLoader + data_loader = accelerator.prepare(data_loader) + + all_examples = [] + for i, batch in enumerate(data_loader): + index, _ = accelerator.gather_for_metrics((batch["index"], batch["label"])) + all_examples.extend(index.detach().cpu().numpy().tolist()) + + # Sort the examples + sorted_all_examples = sorted(all_examples) + + # Check if all elements are present in the sorted list of iterated samples + assert ( + len(set(sorted_all_examples)) == NUM_ELEMENTS + ), "Not all the dataset elements have been iterated in an epoch due to duplication of samples across processes." + + +def main(): + accelerator = create_accelerator() + torch.manual_seed(accelerator.process_index) + + accelerator.print("Test that even_batches variable ensures uniform batches across processes") + test_default_ensures_even_batch_sizes() + + accelerator.print("Run tests with even_batches disabled") + test_can_disable_even_batches() + + accelerator.print("Test joining uneven inputs") + test_can_join_uneven_inputs() + + accelerator.print("Test overriding even_batches when joining uneven inputs") + test_join_can_override_even_batches() + + accelerator.print("Test overriding even_batches for mixed dataloader types") + test_join_can_override_for_mixed_type_dataloaders() + + accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders") + test_join_raises_warning_for_iterable_when_overriding_even_batches() + + accelerator.print("Test join with non DDP distributed raises warning") + original_state = accelerator.state.distributed_type + accelerator.state.distributed_type = DistributedType.FSDP + test_join_raises_warning_for_non_ddp_distributed(accelerator) + accelerator.state.distributed_type = original_state + + dataset = DummyDataset() + # Conventional Dataloader with shuffle=False + loader = DataLoader(dataset, shuffle=False, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS) + test_data_loader(loader, accelerator) + + # Conventional Dataloader with shuffle=True + loader = DataLoader(dataset, shuffle=True, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS) + test_data_loader(loader, accelerator) + + # Dataloader with batch_sampler + sampler = BatchSampler(RandomSampler(dataset), batch_size=BATCH_SIZE, drop_last=False) + loader = DataLoader(dataset, batch_sampler=sampler, num_workers=NUM_WORKERS) + test_data_loader(loader, accelerator) + + # Dataloader with sampler as an instance of `BatchSampler` + sampler = BatchSampler(RandomSampler(dataset), batch_size=BATCH_SIZE, drop_last=False) + loader = DataLoader(dataset, sampler=sampler, batch_size=None, collate_fn=default_collate, num_workers=NUM_WORKERS) + test_data_loader(loader, accelerator) + + +if __name__ == "__main__": + main() diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_notebook.py b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_notebook.py new file mode 100644 index 0000000000000000000000000000000000000000..f0c073ac3eae5a86c351a5f8232b84bcdfb920a8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_notebook.py @@ -0,0 +1,56 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Test file to ensure that in general certain situational setups for notebooks work. +""" + +import os + +from pytest import raises + +from accelerate import PartialState, notebook_launcher +from accelerate.test_utils import require_bnb +from accelerate.utils import is_bnb_available + + +def basic_function(): + # Just prints the PartialState + print(f"PartialState:\n{PartialState()}") + + +NUM_PROCESSES = int(os.environ.get("ACCELERATE_NUM_PROCESSES", 1)) + + +def test_can_initialize(): + notebook_launcher(basic_function, (), num_processes=NUM_PROCESSES) + + +@require_bnb +def test_problematic_imports(): + with raises(RuntimeError, match="Please keep these imports"): + import bitsandbytes as bnb # noqa: F401 + + notebook_launcher(basic_function, (), num_processes=NUM_PROCESSES) + + +def main(): + print("Test basic notebook can be ran") + test_can_initialize() + if is_bnb_available(): + print("Test problematic imports (bnb)") + test_problematic_imports() + + +if __name__ == "__main__": + main() diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_ops.py b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..1b18780fa70fdc2b8f579f07910c8682437459d5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_ops.py @@ -0,0 +1,179 @@ +#!/usr/bin/env python + +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch + +from accelerate import PartialState +from accelerate.test_utils.testing import assert_exception +from accelerate.utils.dataclasses import DistributedType +from accelerate.utils.operations import ( + DistributedOperationException, + broadcast, + copy_tensor_to_devices, + gather, + gather_object, + pad_across_processes, + reduce, +) + + +def create_tensor(state): + return (torch.arange(state.num_processes) + 1.0 + (state.num_processes * state.process_index)).to(state.device) + + +def test_gather(state): + tensor = create_tensor(state) + gathered_tensor = gather(tensor) + assert gathered_tensor.tolist() == list(range(1, state.num_processes**2 + 1)) + + +def test_gather_object(state): + # Gather objects in TorchXLA is not supported. + if state.distributed_type == DistributedType.XLA: + return + obj = [state.process_index] + gathered_obj = gather_object(obj) + assert len(gathered_obj) == state.num_processes, f"{gathered_obj}, {len(gathered_obj)} != {state.num_processes}" + assert gathered_obj == list(range(state.num_processes)), f"{gathered_obj} != {list(range(state.num_processes))}" + + +def test_gather_non_contigous(state): + # Skip this test because the 'is_contiguous' function of XLA tensor always returns True. + if state.distributed_type == DistributedType.XLA: + return + # Create a non-contiguous tensor + tensor = torch.arange(12).view(4, 3).t().to(state.device) + assert not tensor.is_contiguous() + # Shouldn't error out + _ = gather(tensor) + + +def test_broadcast(state): + tensor = create_tensor(state) + broadcasted_tensor = broadcast(tensor) + assert broadcasted_tensor.shape == torch.Size([state.num_processes]) + assert broadcasted_tensor.tolist() == list(range(1, state.num_processes + 1)) + + +def test_pad_across_processes(state): + # We need to pad the tensor with one more element if we are the main process + # to ensure that we can pad + if state.is_main_process: + tensor = torch.arange(state.num_processes + 1).to(state.device) + else: + tensor = torch.arange(state.num_processes).to(state.device) + padded_tensor = pad_across_processes(tensor) + assert padded_tensor.shape == torch.Size([state.num_processes + 1]) + if not state.is_main_process: + assert padded_tensor.tolist() == list(range(0, state.num_processes)) + [0] + + +def test_reduce_sum(state): + # For now runs on only two processes + if state.num_processes != 2: + return + tensor = create_tensor(state) + reduced_tensor = reduce(tensor, "sum") + truth_tensor = torch.tensor([4.0, 6]).to(state.device) + assert torch.allclose(reduced_tensor, truth_tensor), f"{reduced_tensor} != {truth_tensor}" + + +def test_reduce_mean(state): + # For now runs on only two processes + if state.num_processes != 2: + return + tensor = create_tensor(state) + reduced_tensor = reduce(tensor, "mean") + truth_tensor = torch.tensor([2.0, 3]).to(state.device) + assert torch.allclose(reduced_tensor, truth_tensor), f"{reduced_tensor} != {truth_tensor}" + + +def test_op_checker(state): + # Must be in a distributed state, and gathering is currently not supported in TorchXLA. + if state.distributed_type in [DistributedType.NO, DistributedType.XLA]: + return + state.debug = True + # `pad_across_processes` + if state.process_index == 0: + data = {"tensor": torch.tensor([[0.0, 1, 2, 3, 4]]).to(state.device)} + else: + data = {"tensor": torch.tensor([[[0.0, 1, 2, 3, 4, 5]]]).to(state.device)} + + with assert_exception(DistributedOperationException): + pad_across_processes(data, dim=0) + + # `reduce` + if state.process_index == 0: + data = {"tensor": torch.tensor([[0.0, 1, 2, 3, 4]]).to(state.device)} + else: + data = {"tensor": torch.tensor([[[0.0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]]).to(state.device)} + + with assert_exception(DistributedOperationException): + reduce(data) + + # `broadcast` + if state.process_index == 0: + data = {"tensor": torch.tensor([[0.0, 1, 2, 3, 4]]).to(state.device)} + else: + data = {"tensor": torch.tensor([[[0.0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]]).to(state.device)} + + with assert_exception(DistributedOperationException): + broadcast(data) + + state.debug = False + + +def test_copy_tensor_to_devices(state): + if state.distributed_type not in [DistributedType.MULTI_GPU, DistributedType.XLA]: + return + if state.is_main_process: + tensor = torch.tensor([1, 2, 3], dtype=torch.int).to(state.device) + else: + tensor = None + tensor = copy_tensor_to_devices(tensor) + assert torch.allclose(tensor, torch.tensor([1, 2, 3], dtype=torch.int, device=state.device)) + + +def _mp_fn(index): + # For xla_spawn (TPUs) + main() + + +def main(): + state = PartialState() + state.print(f"State: {state}") + state.print("testing gather") + test_gather(state) + state.print("testing gather_object") + test_gather_object(state) + state.print("testing gather non-contigous") + test_gather_non_contigous(state) + state.print("testing broadcast") + test_broadcast(state) + state.print("testing pad_across_processes") + test_pad_across_processes(state) + state.print("testing reduce_sum") + test_reduce_sum(state) + state.print("testing reduce_mean") + test_reduce_mean(state) + state.print("testing op_checker") + test_op_checker(state) + state.print("testing sending tensors across devices") + test_copy_tensor_to_devices(state) + + +if __name__ == "__main__": + main() diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_script.py b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_script.py new file mode 100644 index 0000000000000000000000000000000000000000..1424bef9380bb14f3e4374a7f44036dc82e12344 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_script.py @@ -0,0 +1,804 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib +import io +import math +import time +from copy import deepcopy +from pathlib import Path + +import numpy as np +import torch +from torch.utils.data import DataLoader, Dataset + +from accelerate import Accelerator +from accelerate.data_loader import SeedableRandomSampler, prepare_data_loader +from accelerate.state import AcceleratorState +from accelerate.test_utils import RegressionDataset, are_the_same_tensors +from accelerate.utils import ( + DataLoaderConfiguration, + DistributedType, + gather, + is_bf16_available, + is_datasets_available, + is_ipex_available, + is_mlu_available, + is_npu_available, + is_pytest_available, + is_xpu_available, + set_seed, + synchronize_rng_states, +) + + +# TODO: remove RegressionModel4XPU once ccl support empty buffer in broadcasting. +if is_xpu_available(): + from accelerate.test_utils import RegressionModel4XPU as RegressionModel +else: + from accelerate.test_utils import RegressionModel + + +def generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler=False): + "Creates a dataloader that can also use the `SeedableRandomSampler`" + if use_seedable_sampler: + # The SeedableRandomSampler is needed during distributed setups + # for full reproducability across processes with the `DataLoader` + sampler = SeedableRandomSampler( + generator=generator, + data_source=train_set, + num_samples=len(train_set), + ) + return DataLoader(train_set, batch_size=batch_size, sampler=sampler) + else: + return DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator) + + +def print_main(state): + print(f"Printing from the main process {state.process_index}") + + +def print_local_main(state): + print(f"Printing from the local main process {state.local_process_index}") + + +def print_last(state): + print(f"Printing from the last process {state.process_index}") + + +def print_on(state, process_idx): + print(f"Printing from process {process_idx}: {state.process_index}") + + +def process_execution_check(): + accelerator = Accelerator() + num_processes = accelerator.num_processes + # Test main_process_first context manager + path = Path("check_main_process_first.txt") + with accelerator.main_process_first(): + if accelerator.is_main_process: + time.sleep(0.1) # ensure main process takes longest + with open(path, "a+") as f: + f.write("Currently in the main process\n") + else: + with open(path, "a+") as f: + f.write("Now on another process\n") + accelerator.wait_for_everyone() + + if accelerator.is_main_process: + with open(path) as f: + text = "".join(f.readlines()) + try: + assert text.startswith("Currently in the main process\n"), "Main process was not first" + if num_processes > 1: + assert text.endswith("Now on another process\n"), "Main process was not first" + assert ( + text.count("Now on another process\n") == accelerator.num_processes - 1 + ), f"Only wrote to file {text.count('Now on another process') + 1} times, not {accelerator.num_processes}" + except AssertionError: + path.unlink() + raise + + if accelerator.is_main_process and path.exists(): + path.unlink() + accelerator.wait_for_everyone() + # Test the decorators + f = io.StringIO() + with contextlib.redirect_stdout(f): + accelerator.on_main_process(print_main)(accelerator.state) + result = f.getvalue().rstrip() + if accelerator.is_main_process: + assert result == "Printing from the main process 0", f"{result} != Printing from the main process 0" + else: + assert f.getvalue().rstrip() == "", f'{result} != ""' + f.truncate(0) + f.seek(0) + + with contextlib.redirect_stdout(f): + accelerator.on_local_main_process(print_local_main)(accelerator.state) + if accelerator.is_local_main_process: + assert f.getvalue().rstrip() == "Printing from the local main process 0" + else: + assert f.getvalue().rstrip() == "" + f.truncate(0) + f.seek(0) + + with contextlib.redirect_stdout(f): + accelerator.on_last_process(print_last)(accelerator.state) + if accelerator.is_last_process: + assert f.getvalue().rstrip() == f"Printing from the last process {accelerator.state.num_processes - 1}" + else: + assert f.getvalue().rstrip() == "" + f.truncate(0) + f.seek(0) + + for process_idx in range(num_processes): + with contextlib.redirect_stdout(f): + accelerator.on_process(print_on, process_index=process_idx)(accelerator.state, process_idx) + if accelerator.process_index == process_idx: + assert f.getvalue().rstrip() == f"Printing from process {process_idx}: {accelerator.process_index}" + else: + assert f.getvalue().rstrip() == "" + f.truncate(0) + f.seek(0) + + +def init_state_check(): + # Test we can instantiate this twice in a row. + state = AcceleratorState() + if state.local_process_index == 0: + print("Testing, testing. 1, 2, 3.") + print(state) + + +def rng_sync_check(): + state = AcceleratorState() + synchronize_rng_states(["torch"]) + assert are_the_same_tensors(torch.get_rng_state()), "RNG states improperly synchronized on CPU." + if state.distributed_type == DistributedType.MULTI_GPU: + synchronize_rng_states(["cuda"]) + assert are_the_same_tensors(torch.cuda.get_rng_state()), "RNG states improperly synchronized on GPU." + elif state.distributed_type == DistributedType.MULTI_XPU: + synchronize_rng_states(["xpu"]) + assert are_the_same_tensors(torch.xpu.get_rng_state()), "RNG states improperly synchronized on XPU." + generator = torch.Generator() + synchronize_rng_states(["generator"], generator=generator) + assert are_the_same_tensors(generator.get_state()), "RNG states improperly synchronized in generator." + + if state.local_process_index == 0: + print("All rng are properly synched.") + + +def dl_preparation_check(): + state = AcceleratorState() + length = 32 * state.num_processes + + dl = DataLoader(range(length), batch_size=8) + dl = prepare_data_loader(dl, state.device, state.num_processes, state.process_index, put_on_device=True) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result) + + print(state.process_index, result, type(dl)) + assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result." + + dl = DataLoader(range(length), batch_size=8) + dl = prepare_data_loader( + dl, + state.device, + state.num_processes, + state.process_index, + put_on_device=True, + split_batches=True, + ) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result) + assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result." + + if state.process_index == 0: + print("Non-shuffled dataloader passing.") + + dl = DataLoader(range(length), batch_size=8, shuffle=True) + dl = prepare_data_loader(dl, state.device, state.num_processes, state.process_index, put_on_device=True) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result).tolist() + result.sort() + assert result == list(range(length)), "Wrong shuffled dataloader result." + + dl = DataLoader(range(length), batch_size=8, shuffle=True) + dl = prepare_data_loader( + dl, + state.device, + state.num_processes, + state.process_index, + put_on_device=True, + split_batches=True, + ) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result).tolist() + result.sort() + assert result == list(range(length)), "Wrong shuffled dataloader result." + + if state.local_process_index == 0: + print("Shuffled dataloader passing.") + + +def central_dl_preparation_check(): + state = AcceleratorState() + length = 32 * state.num_processes + + dl = DataLoader(range(length), batch_size=8) + dl = prepare_data_loader( + dl, state.device, state.num_processes, state.process_index, put_on_device=True, dispatch_batches=True + ) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result) + assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result." + + dl = DataLoader(range(length), batch_size=8) + dl = prepare_data_loader( + dl, + state.device, + state.num_processes, + state.process_index, + put_on_device=True, + split_batches=True, + dispatch_batches=True, + ) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result) + assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result." + + if state.process_index == 0: + print("Non-shuffled central dataloader passing.") + + dl = DataLoader(range(length), batch_size=8, shuffle=True) + dl = prepare_data_loader( + dl, state.device, state.num_processes, state.process_index, put_on_device=True, dispatch_batches=True + ) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result).tolist() + result.sort() + assert result == list(range(length)), "Wrong shuffled dataloader result." + + dl = DataLoader(range(length), batch_size=8, shuffle=True) + dl = prepare_data_loader( + dl, + state.device, + state.num_processes, + state.process_index, + put_on_device=True, + split_batches=True, + dispatch_batches=True, + ) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result).tolist() + result.sort() + assert result == list(range(length)), "Wrong shuffled dataloader result." + + if state.local_process_index == 0: + print("Shuffled central dataloader passing.") + + +def custom_sampler_check(): + state = AcceleratorState() + + class CustomDataset(Dataset): + def __init__(self, data): + self.data = data + + def __len__(self): + return len(self.data) + + def __getitem__(self, index): + return self.data[index] + + class CustomBatchSampler: + def __init__(self, dataset_length: int, batch_size: int, shuffle: bool = True): + self.batch_size = batch_size + self.data_index = np.arange(dataset_length) + self.shuffle = shuffle + + def __iter__(self): + num_batches = len(self) + if self.shuffle: + index = np.random.permutation(self.data_index) + else: + index = self.data_index + output = np.array_split(index, num_batches) + yield from output + + def __len__(self): + return math.ceil(len(self.data_index) / self.batch_size) + + dataset = CustomDataset(range(32 * state.num_processes)) + sampler = CustomBatchSampler(len(dataset), batch_size=8) + dl = DataLoader(dataset, batch_sampler=sampler) + dl = prepare_data_loader(dl, state.device, state.num_processes, state.process_index) + # We need just ensure that `dl.batch_sampler` (or `dl.batch_sampler.batch_sampler` is indeed the old batch sampler + if hasattr(dl.batch_sampler, "batch_sampler"): + assert isinstance( + dl.batch_sampler.batch_sampler, CustomBatchSampler + ), "Custom sampler was changed after calling `prepare_data_loader`" + else: + assert isinstance( + dl.batch_sampler, CustomBatchSampler + ), "Custom sampler was changed after calling `prepare_data_loader`" + + +def check_seedable_sampler(): + # Set seed + set_seed(42) + train_set = RegressionDataset(length=10, seed=42) + train_dl = DataLoader(train_set, batch_size=2, shuffle=True) + + config = DataLoaderConfiguration(use_seedable_sampler=True) + accelerator = Accelerator(dataloader_config=config) + train_dl = accelerator.prepare(train_dl) + original_items = [] + for _ in range(3): + for batch in train_dl: + original_items.append(batch["x"]) + original_items = torch.cat(original_items) + + # Set seed again and the epoch + set_seed(42) + train_dl.set_epoch(0) + new_items = [] + for _ in range(3): + for batch in train_dl: + new_items.append(batch["x"]) + new_items = torch.cat(new_items) + assert torch.allclose(original_items, new_items), "Did not obtain the same items with the same seed and epoch." + + +def check_seedable_sampler_in_batch_sampler_shard(): + set_seed(42) + + config = DataLoaderConfiguration(use_seedable_sampler=True) + accelerator = Accelerator(dataloader_config=config) + assert accelerator.num_processes > 1, "This test requires more than one process." + + dataloader = DataLoader(list(range(10)), batch_size=1, shuffle=True) + prepared_data_loader = prepare_data_loader( + dataloader=dataloader, + use_seedable_sampler=True, + ) + + target_sampler = prepared_data_loader.batch_sampler.batch_sampler.sampler + assert isinstance( + target_sampler, SeedableRandomSampler + ), "Sampler in BatchSamplerShard is not SeedableRandomSampler." + + +def mock_training(length, batch_size, generator, use_seedable_sampler=False): + set_seed(42) + generator.manual_seed(42) + train_set = RegressionDataset(length=length, seed=42) + + train_dl = generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler) + model = RegressionModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + for epoch in range(3): + for batch in train_dl: + model.zero_grad() + output = model(batch["x"]) + loss = torch.nn.functional.mse_loss(output, batch["y"]) + loss.backward() + optimizer.step() + return train_set, model + + +def training_check(use_seedable_sampler=False): + state = AcceleratorState() + generator = torch.Generator() + batch_size = 8 + length = batch_size * 4 * state.num_processes + + train_set, old_model = mock_training(length, batch_size * state.num_processes, generator, use_seedable_sampler) + assert are_the_same_tensors(old_model.a), "Did not obtain the same model on both processes." + assert are_the_same_tensors(old_model.b), "Did not obtain the same model on both processes." + + accelerator = Accelerator() + train_dl = generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler) + model = RegressionModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + + train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) + set_seed(42) + generator.manual_seed(42) + for _ in range(3): + for batch in train_dl: + model.zero_grad() + output = model(batch["x"]) + loss = torch.nn.functional.mse_loss(output, batch["y"]) + accelerator.backward(loss) + optimizer.step() + + model = accelerator.unwrap_model(model).cpu() + assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training." + assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training." + + accelerator.print("Training yielded the same results on one CPU or distributed setup with no batch split.") + + dataloader_config = DataLoaderConfiguration(split_batches=True, use_seedable_sampler=use_seedable_sampler) + accelerator = Accelerator(dataloader_config=dataloader_config) + train_dl = generate_baseline_dataloader( + train_set, generator, batch_size * state.num_processes, use_seedable_sampler + ) + model = RegressionModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + + train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) + set_seed(42) + generator.manual_seed(42) + for _ in range(3): + for batch in train_dl: + model.zero_grad() + output = model(batch["x"]) + loss = torch.nn.functional.mse_loss(output, batch["y"]) + accelerator.backward(loss) + optimizer.step() + + model = accelerator.unwrap_model(model).cpu() + assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training." + assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training." + + accelerator.print("Training yielded the same results on one CPU or distributes setup with batch split.") + + if torch.cuda.is_available() or is_npu_available() or is_mlu_available(): + # Mostly a test that FP16 doesn't crash as the operation inside the model is not converted to FP16 + print("FP16 training check.") + AcceleratorState._reset_state() + dataloader_config = DataLoaderConfiguration(use_seedable_sampler=use_seedable_sampler) + accelerator = Accelerator(mixed_precision="fp16", dataloader_config=dataloader_config) + train_dl = generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler) + model = RegressionModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + + train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) + set_seed(42) + generator.manual_seed(42) + for _ in range(3): + for batch in train_dl: + model.zero_grad() + output = model(batch["x"]) + loss = torch.nn.functional.mse_loss(output, batch["y"]) + accelerator.backward(loss) + optimizer.step() + + model = accelerator.unwrap_model(model).cpu() + assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training." + assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training." + + if torch.cuda.is_available(): + # Mostly a test that model.forward will have autocast when running unwrap_model(model, keep_fp32_wrapper=True) + print("Keep fp32 wrapper check.") + AcceleratorState._reset_state() + accelerator = Accelerator(mixed_precision="fp16") + + model = torch.nn.Linear(2, 4) + model = accelerator.prepare(model) + model_with_fp32_wrapper = accelerator.unwrap_model(model, keep_fp32_wrapper=True) + + # Run forward with fp16 as input. + # When the model is with mixed precision wrapper, no error will be raised. + input_tensor = torch.Tensor([1, 2]).to(dtype=torch.float16, device=accelerator.device) + output = model_with_fp32_wrapper(input_tensor) + + # BF16 support is only for CPU + TPU, and some GPU + if is_bf16_available(): + # Mostly a test that BF16 doesn't crash as the operation inside the model is not converted to BF16 + print("BF16 training check.") + AcceleratorState._reset_state() + dataloader_config = DataLoaderConfiguration(use_seedable_sampler=use_seedable_sampler) + accelerator = Accelerator(mixed_precision="bf16", dataloader_config=dataloader_config) + train_dl = generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler) + model = RegressionModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + + train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) + set_seed(42) + generator.manual_seed(42) + for _ in range(3): + for batch in train_dl: + model.zero_grad() + output = model(batch["x"]) + loss = torch.nn.functional.mse_loss(output, batch["y"]) + accelerator.backward(loss) + optimizer.step() + + model = accelerator.unwrap_model(model).cpu() + assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training." + assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training." + + # IPEX support is only for CPU + if is_ipex_available(): + print("ipex BF16 training check.") + AcceleratorState._reset_state() + dataloader_config = DataLoaderConfiguration(use_seedable_sampler=use_seedable_sampler) + accelerator = Accelerator(mixed_precision="bf16", cpu=True, dataloader_config=dataloader_config) + train_dl = generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler) + model = RegressionModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + + train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) + set_seed(42) + generator.manual_seed(42) + for _ in range(3): + for batch in train_dl: + model.zero_grad() + output = model(batch["x"]) + loss = torch.nn.functional.mse_loss(output, batch["y"]) + accelerator.backward(loss) + optimizer.step() + + model = accelerator.unwrap_model(model).cpu() + assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training." + assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training." + + # XPU support is only for XPU + if is_xpu_available(): + print("xpu BF16 training check.") + AcceleratorState._reset_state() + dataloader_config = DataLoaderConfiguration(use_seedable_sampler=use_seedable_sampler) + accelerator = Accelerator(mixed_precision="bf16", cpu=False, dataloader_config=dataloader_config) + train_dl = generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler) + model = RegressionModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + + train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) + set_seed(42) + generator.manual_seed(42) + for _ in range(3): + for batch in train_dl: + model.zero_grad() + output = model(batch["x"]) + loss = torch.nn.functional.mse_loss(output, batch["y"]) + accelerator.backward(loss) + optimizer.step() + + model = accelerator.unwrap_model(model).cpu() + assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on XPU or distributed training." + assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on XPU or distributed training." + + +def test_split_between_processes_dataset(datasets_Dataset): + state = AcceleratorState() + data = datasets_Dataset.from_list([dict(k=v) for v in range(2 * state.num_processes)]) + with state.split_between_processes(data, apply_padding=False) as results: + assert ( + len(results) == 2 + ), f"Each process did not have two items. Process index: {state.process_index}; Length: {len(results)}" + + data = datasets_Dataset.from_list([dict(k=v) for v in range(2 * state.num_processes - 1)]) + with state.split_between_processes(data, apply_padding=False) as results: + if state.is_last_process: + assert ( + len(results) == 1 + ), f"Last process did not receive a single item. Process index: {state.process_index}; Length: {len(results)}" + else: + assert ( + len(results) == 2 + ), f"One of the intermediate processes did not receive two items. Process index: {state.process_index}; Length: {len(results)}" + + data = datasets_Dataset.from_list([dict(k=v) for v in range(2 * state.num_processes - 1)]) + with state.split_between_processes(data, apply_padding=True) as results: + if state.num_processes == 1: + assert ( + len(results) == 1 + ), f"Single process did not receive a single item. Process index: {state.process_index}; Length: {len(results)}" + else: + assert ( + len(results) == 2 + ), f"Each process did not have two items. Process index: {state.process_index}; Length: {len(results)}" + + state.wait_for_everyone() + + +def test_split_between_processes_list(): + state = AcceleratorState() + data = list(range(0, 2 * state.num_processes)) + with state.split_between_processes(data) as results: + assert ( + len(results) == 2 + ), f"Each process did not have two items. Process index: {state.process_index}; Length: {len(results)}" + + data = list(range(0, (3 * state.num_processes) - 1)) + with state.split_between_processes(data, apply_padding=True) as results: + if state.is_last_process: + # Test that the last process gets the extra item(s) + num_samples_per_device = math.ceil(len(data) / state.num_processes) + assert ( + len(results) == num_samples_per_device + ), f"Last process did not get the extra item(s). Process index: {state.process_index}; Length: {len(results)}" + state.wait_for_everyone() + + +def test_split_between_processes_nested_dict(): + state = AcceleratorState() + a = [1, 2, 3, 4, 5, 6, 7, 8] + b = ["a", "b", "c", "d", "e", "f", "g", "h"] + c = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]) + if state.num_processes in (1, 2, 4): + data = {"a": a, "b": b, "c": c} + data_copy = deepcopy(data) + with state.split_between_processes(data) as results: + if state.process_index == 0: + assert results["a"] == data_copy["a"][: 8 // state.num_processes] + elif state.num_processes == 2: + assert results["a"] == data_copy["a"][4:] + elif state.process_index == 3: + # We return a list each time + assert results["a"] == data_copy["a"][-2:], f'Expected: {data_copy["a"][-2]}, Actual: {results["a"]}' + if state.process_index == 0: + assert results["b"] == data_copy["b"][: 8 // state.num_processes] + elif state.num_processes == 2: + assert results["b"] == data_copy["b"][4:] + elif state.process_index == 3: + assert results["b"] == data_copy["b"][-2:] + if state.process_index == 0: + assert torch.allclose( + results["c"], data_copy["c"][: 8 // state.num_processes] + ), f"Did not obtain expected values on process 0, expected `{data['c'][:8 // state.num_processes]}`, received: {results['c']}" + elif state.num_processes == 2: + assert torch.allclose( + results["c"], data_copy["c"][4:] + ), f"Did not obtain expected values on process 2, expected `{data['c'][4:]}`, received: {results['c']}" + elif state.process_index == 3: + assert torch.allclose( + results["c"], data_copy["c"][-2:] + ), f"Did not obtain expected values on process 4, expected `{data['c'][-2:]}`, received: {results['c']}" + + state.wait_for_everyone() + + +def test_split_between_processes_tensor(): + state = AcceleratorState() + if state.num_processes > 1: + data = torch.tensor([[0, 1, 2, 3], [4, 5, 6, 7]]).to(state.device) + with state.split_between_processes(data) as results: + if state.process_index == 0: + assert torch.allclose(results, torch.tensor([0, 1, 2, 3]).to(state.device)) + else: + assert torch.allclose(results, torch.tensor([4, 5, 6, 7]).to(state.device)) + state.wait_for_everyone() + + +def test_trigger(): + accelerator = Accelerator() + # should start with being false + assert accelerator.check_trigger() is False + + # set a breakpoint on the main process + if accelerator.is_main_process: + accelerator.set_trigger() + + # check it's been activated across all processes + # calls `all_reduce` and triggers a sync + assert accelerator.check_trigger() is True + + # check it's been reset after the sync + assert accelerator.check_trigger() is False + + +def test_reinstantiated_state(): + import pytest + + AcceleratorState._reset_state() + simple_model = torch.nn.Linear(1, 1) + # First define an accelerator + accelerator = Accelerator() + # Then call `reset_state`, breaking the state existing in the accelerator + AcceleratorState._reset_state() + # Now try and prepare a simple model, should raise the custom error early + with pytest.raises(AttributeError) as cm: + accelerator.prepare(simple_model) + assert "`AcceleratorState` object has no attribute" in str(cm.value.args[0]) + assert "This happens if `AcceleratorState._reset_state()`" in str(cm.value.args[0]) + + +def main(): + accelerator = Accelerator() + state = accelerator.state + if state.local_process_index == 0: + print("**Initialization**") + init_state_check() + state.wait_for_everyone() + + if state.distributed_type == DistributedType.MULTI_GPU: + num_processes_per_node = torch.cuda.device_count() + else: + num_processes_per_node = state.num_processes + + # We only run this test on non-multinode + if num_processes_per_node == state.num_processes: + if state.process_index == 0: + print("\n**Test process execution**") + process_execution_check() + + if state.process_index == 0: + print("\n**Test split between processes as a list**") + test_split_between_processes_list() + + if state.process_index == 0: + print("\n**Test split between processes as a dict**") + test_split_between_processes_nested_dict() + + if state.process_index == 0: + print("\n**Test split between processes as a tensor**") + test_split_between_processes_tensor() + + if state.process_index == 0: + print("\n**Test split between processes as a datasets.Dataset**") + if is_datasets_available(): + from datasets import Dataset as datasets_Dataset + + test_split_between_processes_dataset(datasets_Dataset) + else: + print("Skipped because Hugging Face datasets is not available") + + if state.local_process_index == 0: + print("\n**Test random number generator synchronization**") + rng_sync_check() + + if state.local_process_index == 0: + print("\n**DataLoader integration test**") + dl_preparation_check() + if state.distributed_type != DistributedType.XLA: + central_dl_preparation_check() + custom_sampler_check() + check_seedable_sampler() + + if state.num_processes > 1: + check_seedable_sampler_in_batch_sampler_shard() + + # Trainings are not exactly the same in DeepSpeed and CPU mode + if state.distributed_type == DistributedType.DEEPSPEED: + return + + if state.local_process_index == 0: + print("\n**Training integration test**") + training_check(use_seedable_sampler=False) + training_check(use_seedable_sampler=True) + + if state.local_process_index == 0: + print("\n**Breakpoint trigger test**") + test_trigger() + + if is_pytest_available(): + if state.local_process_index == 0: + print("\n**Test reinstantiated state**") + test_reinstantiated_state() + + +if __name__ == "__main__": + main() diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_sync.py b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_sync.py new file mode 100644 index 0000000000000000000000000000000000000000..bd458bcab8aaa42409a7c1234a4afffb087e8a7c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_sync.py @@ -0,0 +1,392 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from copy import deepcopy + +import torch +import torch.nn.functional as F +from torch.optim import AdamW +from torch.optim.lr_scheduler import LambdaLR +from torch.utils.data import DataLoader + +from accelerate.accelerator import Accelerator, GradientAccumulationPlugin +from accelerate.state import GradientState +from accelerate.test_utils import RegressionDataset, RegressionModel +from accelerate.utils import DistributedType, set_seed + + +def check_model_parameters(model_a, model_b, did_step, iteration, **kwargs): + for param, grad_param in zip(model_a.parameters(), model_b.parameters()): + if not param.requires_grad: + continue + if not did_step: + # Grads should not be in sync + assert ( + torch.allclose(param.grad, grad_param.grad, **kwargs) is False + ), f"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})" + else: + # Grads should be in sync + assert ( + torch.allclose(param.grad, grad_param.grad, **kwargs) is True + ), f"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})" + + +def step_model(model, input, target, accelerator, do_backward=True): + model.train() + output = model(input) + loss = F.mse_loss(output, target.to(output.device)) + if not do_backward: + loss /= accelerator.gradient_accumulation_steps + loss.backward() + else: + accelerator.backward(loss) + + +def get_training_setup(accelerator, sched=False): + "Returns everything needed to perform basic training" + set_seed(42) + model = RegressionModel() + ddp_model = deepcopy(model) + dset = RegressionDataset(length=80) + dataloader = DataLoader(dset, batch_size=16) + model.to(accelerator.device) + if sched: + opt = AdamW(params=model.parameters(), lr=1e-3) + ddp_opt = AdamW(params=ddp_model.parameters(), lr=1e-3) + sched = LambdaLR(opt, lr_lambda=lambda epoch: epoch**0.65) + ddp_sched = LambdaLR(ddp_opt, lr_lambda=lambda epoch: epoch**0.65) + # Make a copy of `model` + if sched: + ddp_model, ddp_opt, ddp_sched, dataloader = accelerator.prepare(ddp_model, ddp_opt, ddp_sched, dataloader) + else: + ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader) + if sched: + return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) + return model, ddp_model, dataloader + + +def test_noop_sync(accelerator): + # Test when on a single CPU or GPU that the context manager does nothing + model, ddp_model, dataloader = get_training_setup(accelerator) + # Use a single batch + ddp_input, ddp_target = next(iter(dataloader)).values() + for iteration in range(3): + # Gather the distributed inputs and targs for the base model + input, target = accelerator.gather((ddp_input, ddp_target)) + input, target = input.to(accelerator.device), target.to(accelerator.device) + # Perform our initial ground truth step in non "DDP" + step_model(model, input, target, accelerator) + # Do "gradient accumulation" (noop) + if iteration % 2 == 0: + # Accumulate grads locally + with accelerator.no_sync(ddp_model): + step_model(ddp_model, ddp_input, ddp_target, accelerator) + else: + # Sync grads + step_model(ddp_model, ddp_input, ddp_target, accelerator) + + # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync + check_model_parameters(model, ddp_model, True, iteration) + for param, ddp_param in zip(model.parameters(), ddp_model.parameters()): + if not param.requires_grad: + continue + assert torch.allclose( + param.grad, ddp_param.grad + ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" + + # Shuffle ddp_input on each iteration + torch.manual_seed(1337 + iteration) + ddp_input = ddp_input[torch.randperm(len(ddp_input))] + + +def test_distributed_sync(accelerator): + # Test on distributed setup that context manager behaves properly + model, ddp_model, dataloader = get_training_setup(accelerator) + # Use a single batch + ddp_input, ddp_target = next(iter(dataloader)).values() + for iteration in range(3): + # Gather the distributed inputs and targs for the base model + input, target = accelerator.gather((ddp_input, ddp_target)) + input, target = input.to(accelerator.device), target.to(accelerator.device) + # Perform our initial ground truth step in non "DDP" + step_model(model, input, target, accelerator) + # Do "gradient accumulation" (noop) + if iteration % 2 == 0: + # Accumulate grads locally + with accelerator.no_sync(ddp_model): + step_model(ddp_model, ddp_input, ddp_target, accelerator) + else: + # Sync grads + step_model(ddp_model, ddp_input, ddp_target, accelerator) + + # DDP model and model should only be in sync when not (iteration % 2 == 0) + for param, ddp_param in zip(model.parameters(), ddp_model.parameters()): + if not param.requires_grad: + continue + if iteration % 2 == 0: + # Grads should not be in sync + assert ( + torch.allclose(param.grad, ddp_param.grad) is False + ), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})" + else: + # Grads should be in sync + assert ( + torch.allclose(param.grad, ddp_param.grad) is True + ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" + + # Shuffle ddp_input on each iteration + torch.manual_seed(1337 + iteration) + ddp_input = ddp_input[torch.randperm(len(ddp_input))] + + +def test_distributed_sync_multiple_fwd(accelerator): + # Test on distributed setup that context manager behaves properly when used with multiple forwards followed by multiple backwards + model, ddp_model, dataloader = get_training_setup(accelerator) + # Do multiple forwards + losses = [] + num_iterations = 3 + for iteration in range(num_iterations): + ddp_input, ddp_target = next(iter(dataloader)).values() + + # Gather the distributed inputs and targs for the base model + input, target = accelerator.gather((ddp_input, ddp_target)) + input, target = input.to(accelerator.device), target.to(accelerator.device) + + # Perform our initial ground truth step in non "DDP" + step_model(model, input, target, accelerator) + + # Accumulate grads locally + with accelerator.no_sync(ddp_model): + ddp_output = ddp_model(ddp_input) + loss = F.mse_loss(ddp_output, ddp_target.to(ddp_output.device)) + losses.append(loss) + + # Do multiple backwards and sync only at the last backward + for iteration in range(num_iterations): + loss = losses[iteration] + + if iteration < num_iterations - 1: + # Accumulate grads locally + accelerator.backward(loss) + + # DDP model and model should only be in sync after last backward + for param, ddp_param in zip(model.parameters(), ddp_model.parameters()): + if not param.requires_grad: + continue + # Grads should not be in sync + assert ( + torch.allclose(param.grad, ddp_param.grad) is False + ), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})" + + else: + # Sync grads if last backward + with accelerator.trigger_sync_in_backward(ddp_model): + accelerator.backward(loss) + + # DDP model and model should only be in sync after last backward + for param, ddp_param in zip(model.parameters(), ddp_model.parameters()): + if not param.requires_grad: + continue + # Grads should be in sync + assert ( + torch.allclose(param.grad, ddp_param.grad) is True + ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" + + +def test_gradient_accumulation(split_batches=False, dispatch_batches=False, sync_each_batch=False): + gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=2, sync_each_batch=sync_each_batch) + accelerator = Accelerator( + split_batches=split_batches, + dispatch_batches=dispatch_batches, + gradient_accumulation_plugin=gradient_accumulation_plugin, + ) + # Test that context manager behaves properly + model, ddp_model, dataloader = get_training_setup(accelerator) + for iteration, batch in enumerate(dataloader): + ddp_input, ddp_target = batch.values() + # Gather the distributed inputs and targs for the base model + input, target = accelerator.gather((ddp_input, ddp_target)) + input, target = input.to(accelerator.device), target.to(accelerator.device) + # Perform our initial ground truth step in non "DDP" + step_model(model, input, target, accelerator, False) + # Do "gradient accumulation" (noop) + with accelerator.accumulate(ddp_model): + step_model(ddp_model, ddp_input, ddp_target, accelerator) + + # DDP model and model should only be in sync when not (iteration % 2 == 0) + for param, ddp_param in zip(model.parameters(), ddp_model.parameters()): + if not param.requires_grad: + continue + if ((iteration + 1) % 2 == 0) or (iteration == len(dataloader) - 1) or sync_each_batch: + # Grads should be in sync + assert ( + torch.allclose(param.grad, ddp_param.grad) is True + ), f"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" + else: + # Grads should not be in sync + assert ( + torch.allclose(param.grad, ddp_param.grad) is False + ), f"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})" + + # Shuffle ddp_input on each iteration + torch.manual_seed(1337 + iteration) + ddp_input = ddp_input[torch.randperm(len(ddp_input))] + GradientState._reset_state() + + +def test_gradient_accumulation_with_opt_and_scheduler( + split_batches=False, dispatch_batches=False, sync_each_batch=False +): + gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=2, sync_each_batch=sync_each_batch) + accelerator = Accelerator( + split_batches=split_batches, + dispatch_batches=dispatch_batches, + gradient_accumulation_plugin=gradient_accumulation_plugin, + ) + # Test that context manager behaves properly + model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched = get_training_setup(accelerator, True) + for iteration, batch in enumerate(dataloader): + ddp_input, ddp_target = batch.values() + # Gather the distributed inputs and targs for the base model + input, target = accelerator.gather((ddp_input, ddp_target)) + input, target = input.to(accelerator.device), target.to(accelerator.device) + # Perform our initial ground truth step in non "DDP" + model.train() + ddp_model.train() + step_model(model, input, target, accelerator, False) + opt.step() + + if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(dataloader)) or sync_each_batch: + if split_batches: + sched.step() + else: + for _ in range(accelerator.num_processes): + sched.step() + + # Perform gradient accumulation under wrapper + with accelerator.accumulate(ddp_model): + step_model(ddp_model, ddp_input, ddp_target, accelerator) + ddp_opt.step() + ddp_sched.step() + + # Learning rates should be the same + assert ( + opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] + ), f'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n' + did_step = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(dataloader)) or sync_each_batch + if accelerator.num_processes > 1: + check_model_parameters( + model, + ddp_model, + did_step, + iteration, + rtol=1e-3, # somehow needs a relative tolerance + ) + + if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(dataloader)) or sync_each_batch: + opt.zero_grad() # needs to be guarded by logic as to when we should zero grads + ddp_opt.zero_grad() + + # Shuffle ddp_input on each iteration + torch.manual_seed(1337 + iteration) + GradientState._reset_state() + + +def test_dataloader_break(): + accelerator = Accelerator() + + first_dset = RegressionDataset(length=80) + first_dataloader = DataLoader(first_dset, batch_size=16) + second_dset = RegressionDataset(length=96) + second_dataloader = DataLoader(second_dset, batch_size=16) + first_dataloader, second_dataloader = accelerator.prepare(first_dataloader, second_dataloader) + assert accelerator.gradient_state.active_dataloader is None + for iteration, _ in enumerate(first_dataloader): + assert id(accelerator.gradient_state.active_dataloader) == id(first_dataloader) + if iteration < len(first_dataloader) - 1: + assert not accelerator.gradient_state.end_of_dataloader + if iteration == 1: + for batch_num, _ in enumerate(second_dataloader): + assert id(accelerator.gradient_state.active_dataloader) == id(second_dataloader) + if batch_num < len(second_dataloader) - 1: + assert not accelerator.gradient_state.end_of_dataloader + else: + assert accelerator.gradient_state.end_of_dataloader + else: + assert accelerator.gradient_state.end_of_dataloader + assert accelerator.gradient_state.active_dataloader is None + + +def main(): + accelerator = Accelerator() + state = accelerator.state + if state.local_process_index == 0: + print("**Test `accumulate` gradient accumulation with dataloader break**") + if state.distributed_type != DistributedType.XLA: + test_dataloader_break() + if state.distributed_type == DistributedType.NO: + if state.local_process_index == 0: + print("**Test NOOP `no_sync` context manager**") + test_noop_sync(accelerator) + if state.distributed_type in ( + DistributedType.MULTI_GPU, + DistributedType.MULTI_NPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_CPU, + ): + if state.local_process_index == 0: + print("**Test Distributed `no_sync` context manager**") + test_distributed_sync(accelerator) + if state.local_process_index == 0: + print("**Test Distributed `no_sync` context manager with multiple forwards**") + test_distributed_sync_multiple_fwd(accelerator) + if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU): + for split_batch in [True, False]: + for dispatch_batches in [True, False]: + for sync_each_batch in [True, False]: + if state.local_process_index == 0: + print( + "**Test `accumulate` gradient accumulation, ", + f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}` and `sync_each_batch={sync_each_batch}`**", + ) + test_gradient_accumulation(split_batch, dispatch_batches, sync_each_batch) + + # Currently will break on torch 2.0 +, need to investigate why + if state.local_process_index == 0: + print( + "**Test `accumulate` gradient accumulation with optimizer and scheduler, ", + "`split_batches=False`, `dispatch_batches=False`, `sync_each_batch=False`**", + ) + test_gradient_accumulation_with_opt_and_scheduler() + if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU): + for split_batch in [True, False]: + for dispatch_batches in [True, False]: + for sync_each_batch in [True, False]: + if not split_batch and not dispatch_batches and not sync_each_batch: + continue + if state.local_process_index == 0: + print( + "**Test `accumulate` gradient accumulation with optimizer and scheduler, ", + f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}` and `sync_each_batch={sync_each_batch}`**", + ) + test_gradient_accumulation_with_opt_and_scheduler(split_batch, dispatch_batches, sync_each_batch) + + +def _mp_fn(index): + # For xla_spawn (TPUs) + main() + + +if __name__ == "__main__": + main() diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/testing.py b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/testing.py new file mode 100644 index 0000000000000000000000000000000000000000..bbcdbdf754179e6b3bfda38011de41c25661162a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/testing.py @@ -0,0 +1,621 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import inspect +import os +import shutil +import subprocess +import sys +import tempfile +import unittest +from contextlib import contextmanager +from functools import partial +from pathlib import Path +from typing import List, Union +from unittest import mock + +import torch + +import accelerate + +from ..state import AcceleratorState, PartialState +from ..utils import ( + gather, + is_bnb_available, + is_clearml_available, + is_comet_ml_available, + is_cuda_available, + is_datasets_available, + is_deepspeed_available, + is_dvclive_available, + is_mlu_available, + is_mps_available, + is_npu_available, + is_pandas_available, + is_pippy_available, + is_schedulefree_available, + is_tensorboard_available, + is_timm_available, + is_torch_version, + is_torch_xla_available, + is_torchvision_available, + is_transformers_available, + is_wandb_available, + is_xpu_available, + str_to_bool, +) + + +def get_backend(): + if is_torch_xla_available(): + return "xla", torch.cuda.device_count(), torch.cuda.memory_allocated + elif is_cuda_available(): + return "cuda", torch.cuda.device_count(), torch.cuda.memory_allocated + elif is_mps_available(): + return "mps", 1, torch.mps.current_allocated_memory() + elif is_mlu_available(): + return "mlu", torch.mlu.device_count(), torch.mlu.memory_allocated + elif is_npu_available(): + return "npu", torch.npu.device_count(), torch.npu.memory_allocated + elif is_xpu_available(): + return "xpu", torch.xpu.device_count(), torch.xpu.memory_allocated + else: + return "cpu", 1, 0 + + +torch_device, device_count, memory_allocated_func = get_backend() + + +def get_launch_command(**kwargs) -> list: + """ + Wraps around `kwargs` to help simplify launching from `subprocess`. + + Example: + ```python + # returns ['accelerate', 'launch', '--num_processes=2', '--device_count=2'] + get_launch_command(num_processes=2, device_count=2) + ``` + """ + command = ["accelerate", "launch"] + for k, v in kwargs.items(): + if isinstance(v, bool) and v: + command.append(f"--{k}") + elif v is not None: + command.append(f"--{k}={v}") + return command + + +DEFAULT_LAUNCH_COMMAND = get_launch_command(num_processes=device_count) + + +def parse_flag_from_env(key, default=False): + try: + value = os.environ[key] + except KeyError: + # KEY isn't set, default to `default`. + _value = default + else: + # KEY is set, convert it to True or False. + try: + _value = str_to_bool(value) + except ValueError: + # More values are supported, but let's keep the message simple. + raise ValueError(f"If set, {key} must be yes or no.") + return _value + + +_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False) + + +def skip(test_case): + "Decorator that skips a test unconditionally" + return unittest.skip("Test was skipped")(test_case) + + +def slow(test_case): + """ + Decorator marking a test as slow. Slow tests are skipped by default. Set the RUN_SLOW environment variable to a + truthy value to run them. + """ + return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case) + + +def require_cpu(test_case): + """ + Decorator marking a test that must be only ran on the CPU. These tests are skipped when a GPU is available. + """ + return unittest.skipUnless(torch_device == "cpu", "test requires only a CPU")(test_case) + + +def require_non_cpu(test_case): + """ + Decorator marking a test that requires a hardware accelerator backend. These tests are skipped when there are no + hardware accelerator available. + """ + return unittest.skipUnless(torch_device != "cpu", "test requires a GPU")(test_case) + + +def require_cuda(test_case): + """ + Decorator marking a test that requires CUDA. These tests are skipped when there are no GPU available or when + TorchXLA is available. + """ + return unittest.skipUnless(is_cuda_available() and not is_torch_xla_available(), "test requires a GPU")(test_case) + + +def require_xpu(test_case): + """ + Decorator marking a test that requires XPU. These tests are skipped when there are no XPU available. + """ + return unittest.skipUnless(is_xpu_available(), "test requires a XPU")(test_case) + + +def require_non_xpu(test_case): + """ + Decorator marking a test that should be skipped for XPU. + """ + return unittest.skipUnless(torch_device != "xpu", "test requires a non-XPU")(test_case) + + +def require_mlu(test_case): + """ + Decorator marking a test that requires MLU. These tests are skipped when there are no MLU available. + """ + return unittest.skipUnless(is_mlu_available(), "test require a MLU")(test_case) + + +def require_npu(test_case): + """ + Decorator marking a test that requires NPU. These tests are skipped when there are no NPU available. + """ + return unittest.skipUnless(is_npu_available(), "test require a NPU")(test_case) + + +def require_mps(test_case): + """ + Decorator marking a test that requires MPS backend. These tests are skipped when torch doesn't support `mps` + backend. + """ + return unittest.skipUnless(is_mps_available(), "test requires a `mps` backend support in `torch`")(test_case) + + +def require_huggingface_suite(test_case): + """ + Decorator marking a test that requires transformers and datasets. These tests are skipped when they are not. + """ + return unittest.skipUnless( + is_transformers_available() and is_datasets_available(), + "test requires the Hugging Face suite", + )(test_case) + + +def require_transformers(test_case): + """ + Decorator marking a test that requires transformers. These tests are skipped when they are not. + """ + return unittest.skipUnless(is_transformers_available(), "test requires the transformers library")(test_case) + + +def require_timm(test_case): + """ + Decorator marking a test that requires transformers. These tests are skipped when they are not. + """ + return unittest.skipUnless(is_timm_available(), "test requires the timm library")(test_case) + + +def require_torchvision(test_case): + """ + Decorator marking a test that requires torchvision. These tests are skipped when they are not. + """ + return unittest.skipUnless(is_torchvision_available(), "test requires the torchvision library")(test_case) + + +def require_schedulefree(test_case): + """ + Decorator marking a test that requires schedulefree. These tests are skipped when they are not. + """ + return unittest.skipUnless(is_schedulefree_available(), "test requires the schedulefree library")(test_case) + + +def require_bnb(test_case): + """ + Decorator marking a test that requires bitsandbytes. These tests are skipped when they are not. + """ + return unittest.skipUnless(is_bnb_available(), "test requires the bitsandbytes library")(test_case) + + +def require_tpu(test_case): + """ + Decorator marking a test that requires TPUs. These tests are skipped when there are no TPUs available. + """ + return unittest.skipUnless(is_torch_xla_available(check_is_tpu=True), "test requires TPU")(test_case) + + +def require_non_torch_xla(test_case): + """ + Decorator marking a test as requiring an environment without TorchXLA. These tests are skipped when TorchXLA is + available. + """ + return unittest.skipUnless(not is_torch_xla_available(), "test requires an env without TorchXLA")(test_case) + + +def require_single_device(test_case): + """ + Decorator marking a test that requires a single device. These tests are skipped when there is no hardware + accelerator available or number of devices is more than one. + """ + return unittest.skipUnless(torch_device != "cpu" and device_count == 1, "test requires a hardware accelerator")( + test_case + ) + + +def require_single_gpu(test_case): + """ + Decorator marking a test that requires CUDA on a single GPU. These tests are skipped when there are no GPU + available or number of GPUs is more than one. + """ + return unittest.skipUnless(torch.cuda.device_count() == 1, "test requires a GPU")(test_case) + + +def require_single_xpu(test_case): + """ + Decorator marking a test that requires CUDA on a single XPU. These tests are skipped when there are no XPU + available or number of xPUs is more than one. + """ + return unittest.skipUnless(torch.xpu.device_count() == 1, "test requires a XPU")(test_case) + + +def require_multi_device(test_case): + """ + Decorator marking a test that requires a multi-device setup. These tests are skipped on a machine without multiple + devices. + """ + return unittest.skipUnless(device_count > 1, "test requires multiple hardware accelerators")(test_case) + + +def require_multi_gpu(test_case): + """ + Decorator marking a test that requires a multi-GPU setup. These tests are skipped on a machine without multiple + GPUs. + """ + return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case) + + +def require_multi_xpu(test_case): + """ + Decorator marking a test that requires a multi-XPU setup. These tests are skipped on a machine without multiple + XPUs. + """ + return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case) + + +def require_deepspeed(test_case): + """ + Decorator marking a test that requires DeepSpeed installed. These tests are skipped when DeepSpeed isn't installed + """ + return unittest.skipUnless(is_deepspeed_available(), "test requires DeepSpeed")(test_case) + + +def require_fsdp(test_case): + """ + Decorator marking a test that requires FSDP installed. These tests are skipped when FSDP isn't installed + """ + return unittest.skipUnless(is_torch_version(">=", "1.12.0"), "test requires torch version >= 1.12.0")(test_case) + + +def require_torch_min_version(test_case=None, version=None): + """ + Decorator marking that a test requires a particular torch version to be tested. These tests are skipped when an + installed torch version is less than the required one. + """ + if test_case is None: + return partial(require_torch_min_version, version=version) + return unittest.skipUnless(is_torch_version(">=", version), f"test requires torch version >= {version}")(test_case) + + +def require_tensorboard(test_case): + """ + Decorator marking a test that requires tensorboard installed. These tests are skipped when tensorboard isn't + installed + """ + return unittest.skipUnless(is_tensorboard_available(), "test requires Tensorboard")(test_case) + + +def require_wandb(test_case): + """ + Decorator marking a test that requires wandb installed. These tests are skipped when wandb isn't installed + """ + return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case) + + +def require_comet_ml(test_case): + """ + Decorator marking a test that requires comet_ml installed. These tests are skipped when comet_ml isn't installed + """ + return unittest.skipUnless(is_comet_ml_available(), "test requires comet_ml")(test_case) + + +def require_clearml(test_case): + """ + Decorator marking a test that requires clearml installed. These tests are skipped when clearml isn't installed + """ + return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case) + + +def require_dvclive(test_case): + """ + Decorator marking a test that requires dvclive installed. These tests are skipped when dvclive isn't installed + """ + return unittest.skipUnless(is_dvclive_available(), "test requires dvclive")(test_case) + + +def require_pandas(test_case): + """ + Decorator marking a test that requires pandas installed. These tests are skipped when pandas isn't installed + """ + return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case) + + +def require_pippy(test_case): + """ + Decorator marking a test that requires pippy installed. These tests are skipped when pippy isn't installed + """ + return unittest.skipUnless(is_pippy_available(), "test requires pippy")(test_case) + + +_atleast_one_tracker_available = ( + any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() +) + + +def require_trackers(test_case): + """ + Decorator marking that a test requires at least one tracking library installed. These tests are skipped when none + are installed + """ + return unittest.skipUnless( + _atleast_one_tracker_available, + "test requires at least one tracker to be available and for `comet_ml` to not be installed", + )(test_case) + + +class TempDirTestCase(unittest.TestCase): + """ + A TestCase class that keeps a single `tempfile.TemporaryDirectory` open for the duration of the class, wipes its + data at the start of a test, and then destroyes it at the end of the TestCase. + + Useful for when a class or API requires a single constant folder throughout it's use, such as Weights and Biases + + The temporary directory location will be stored in `self.tmpdir` + """ + + clear_on_setup = True + + @classmethod + def setUpClass(cls): + "Creates a `tempfile.TemporaryDirectory` and stores it in `cls.tmpdir`" + cls.tmpdir = Path(tempfile.mkdtemp()) + + @classmethod + def tearDownClass(cls): + "Remove `cls.tmpdir` after test suite has finished" + if os.path.exists(cls.tmpdir): + shutil.rmtree(cls.tmpdir) + + def setUp(self): + "Destroy all contents in `self.tmpdir`, but not `self.tmpdir`" + if self.clear_on_setup: + for path in self.tmpdir.glob("**/*"): + if path.is_file(): + path.unlink() + elif path.is_dir(): + shutil.rmtree(path) + + +class AccelerateTestCase(unittest.TestCase): + """ + A TestCase class that will reset the accelerator state at the end of every test. Every test that checks or utilizes + the `AcceleratorState` class should inherit from this to avoid silent failures due to state being shared between + tests. + """ + + def tearDown(self): + super().tearDown() + # Reset the state of the AcceleratorState singleton. + AcceleratorState._reset_state() + PartialState._reset_state() + + +class MockingTestCase(unittest.TestCase): + """ + A TestCase class designed to dynamically add various mockers that should be used in every test, mimicking the + behavior of a class-wide mock when defining one normally will not do. + + Useful when a mock requires specific information available only initialized after `TestCase.setUpClass`, such as + setting an environment variable with that information. + + The `add_mocks` function should be ran at the end of a `TestCase`'s `setUp` function, after a call to + `super().setUp()` such as: + ```python + def setUp(self): + super().setUp() + mocks = mock.patch.dict(os.environ, {"SOME_ENV_VAR", "SOME_VALUE"}) + self.add_mocks(mocks) + ``` + """ + + def add_mocks(self, mocks: Union[mock.Mock, List[mock.Mock]]): + """ + Add custom mocks for tests that should be repeated on each test. Should be called during + `MockingTestCase.setUp`, after `super().setUp()`. + + Args: + mocks (`mock.Mock` or list of `mock.Mock`): + Mocks that should be added to the `TestCase` after `TestCase.setUpClass` has been run + """ + self.mocks = mocks if isinstance(mocks, (tuple, list)) else [mocks] + for m in self.mocks: + m.start() + self.addCleanup(m.stop) + + +def are_the_same_tensors(tensor): + state = AcceleratorState() + tensor = tensor[None].clone().to(state.device) + tensors = gather(tensor).cpu() + tensor = tensor[0].cpu() + for i in range(tensors.shape[0]): + if not torch.equal(tensors[i], tensor): + return False + return True + + +class _RunOutput: + def __init__(self, returncode, stdout, stderr): + self.returncode = returncode + self.stdout = stdout + self.stderr = stderr + + +async def _read_stream(stream, callback): + while True: + line = await stream.readline() + if line: + callback(line) + else: + break + + +async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput: + if echo: + print("\nRunning: ", " ".join(cmd)) + + p = await asyncio.create_subprocess_exec( + cmd[0], + *cmd[1:], + stdin=stdin, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + env=env, + ) + + # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe + # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait + # + # If it starts hanging, will need to switch to the following code. The problem is that no data + # will be seen until it's done and if it hangs for example there will be no debug info. + # out, err = await p.communicate() + # return _RunOutput(p.returncode, out, err) + + out = [] + err = [] + + def tee(line, sink, pipe, label=""): + line = line.decode("utf-8").rstrip() + sink.append(line) + if not quiet: + print(label, line, file=pipe) + + # XXX: the timeout doesn't seem to make any difference here + await asyncio.wait( + [ + asyncio.create_task(_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:"))), + asyncio.create_task(_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:"))), + ], + timeout=timeout, + ) + return _RunOutput(await p.wait(), out, err) + + +def execute_subprocess_async(cmd: list, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput: + # Cast every path in `cmd` to a string + for i, c in enumerate(cmd): + if isinstance(c, Path): + cmd[i] = str(c) + loop = asyncio.get_event_loop() + result = loop.run_until_complete( + _stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo) + ) + + cmd_str = " ".join(cmd) + if result.returncode > 0: + stderr = "\n".join(result.stderr) + raise RuntimeError( + f"'{cmd_str}' failed with returncode {result.returncode}\n\n" + f"The combined stderr from workers follows:\n{stderr}" + ) + + return result + + +class SubprocessCallException(Exception): + pass + + +def run_command(command: List[str], return_stdout=False, env=None): + """ + Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture + if an error occured while running `command` + """ + # Cast every path in `command` to a string + for i, c in enumerate(command): + if isinstance(c, Path): + command[i] = str(c) + if env is None: + env = os.environ.copy() + try: + output = subprocess.check_output(command, stderr=subprocess.STDOUT, env=env) + if return_stdout: + if hasattr(output, "decode"): + output = output.decode("utf-8") + return output + except subprocess.CalledProcessError as e: + raise SubprocessCallException( + f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}" + ) from e + + +def path_in_accelerate_package(*components: str) -> Path: + """ + Get a path within the `accelerate` package's directory. + + Args: + *components: Components of the path to join after the package directory. + + Returns: + `Path`: The path to the requested file or directory. + """ + + accelerate_package_dir = Path(inspect.getfile(accelerate)).parent + return accelerate_package_dir.joinpath(*components) + + +@contextmanager +def assert_exception(exception_class: Exception, msg: str = None) -> bool: + """ + Context manager to assert that the right `Exception` class was raised. + + If `msg` is provided, will check that the message is contained in the raised exception. + """ + was_ran = False + try: + yield + was_ran = True + except Exception as e: + assert isinstance(e, exception_class), f"Expected exception of type {exception_class} but got {type(e)}" + if msg is not None: + assert msg in str(e), f"Expected message '{msg}' to be in exception but got '{str(e)}'" + if was_ran: + raise AssertionError(f"Expected exception of type {exception_class} but ran without issue.") diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/training.py b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/training.py new file mode 100644 index 0000000000000000000000000000000000000000..d89cfd3c71546871d00cb9c2a5cd07494c46cbfe --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/training.py @@ -0,0 +1,101 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import torch +from torch.utils.data import DataLoader + +from accelerate.utils.dataclasses import DistributedType + + +class RegressionDataset: + def __init__(self, a=2, b=3, length=64, seed=None): + rng = np.random.default_rng(seed) + self.length = length + self.x = rng.normal(size=(length,)).astype(np.float32) + self.y = a * self.x + b + rng.normal(scale=0.1, size=(length,)).astype(np.float32) + + def __len__(self): + return self.length + + def __getitem__(self, i): + return {"x": self.x[i], "y": self.y[i]} + + +class RegressionModel4XPU(torch.nn.Module): + def __init__(self, a=0, b=0, double_output=False): + super().__init__() + self.a = torch.nn.Parameter(torch.tensor([2, 3]).float()) + self.b = torch.nn.Parameter(torch.tensor([2, 3]).float()) + self.first_batch = True + + def forward(self, x=None): + if self.first_batch: + print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}") + self.first_batch = False + return x * self.a[0] + self.b[0] + + +class RegressionModel(torch.nn.Module): + def __init__(self, a=0, b=0, double_output=False): + super().__init__() + self.a = torch.nn.Parameter(torch.tensor(a).float()) + self.b = torch.nn.Parameter(torch.tensor(b).float()) + self.first_batch = True + + def forward(self, x=None): + if self.first_batch: + print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}") + self.first_batch = False + return x * self.a + self.b + + +def mocked_dataloaders(accelerator, batch_size: int = 16): + from datasets import load_dataset + from transformers import AutoTokenizer + + tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") + data_files = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"} + datasets = load_dataset("csv", data_files=data_files) + label_list = datasets["train"].unique("label") + + label_to_id = {v: i for i, v in enumerate(label_list)} + + def tokenize_function(examples): + # max_length=None => use the model max length (it's actually the default) + outputs = tokenizer( + examples["sentence1"], examples["sentence2"], truncation=True, max_length=None, padding="max_length" + ) + if "label" in examples: + outputs["labels"] = [label_to_id[l] for l in examples["label"]] + return outputs + + # Apply the method we just defined to all the examples in all the splits of the dataset + tokenized_datasets = datasets.map( + tokenize_function, + batched=True, + remove_columns=["sentence1", "sentence2", "label"], + ) + + def collate_fn(examples): + # On TPU it's best to pad everything to the same length or training will be very slow. + if accelerator.distributed_type == DistributedType.XLA: + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + + # Instantiate dataloaders. + train_dataloader = DataLoader(tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=2) + eval_dataloader = DataLoader(tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=1) + + return train_dataloader, eval_dataloader diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/utils/__init__.py b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b104aee0063f3610c153dbd1bf43a69614bb1e7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/__init__.py @@ -0,0 +1,229 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from .constants import ( + MODEL_NAME, + OPTIMIZER_NAME, + RNG_STATE_NAME, + SAFE_MODEL_NAME, + SAFE_WEIGHTS_INDEX_NAME, + SAFE_WEIGHTS_NAME, + SAMPLER_NAME, + SCALER_NAME, + SCHEDULER_NAME, + TORCH_DISTRIBUTED_OPERATION_TYPES, + TORCH_LAUNCH_PARAMS, + WEIGHTS_INDEX_NAME, + WEIGHTS_NAME, +) +from .dataclasses import ( + AutocastKwargs, + BnbQuantizationConfig, + ComputeEnvironment, + CustomDtype, + DataLoaderConfiguration, + DeepSpeedPlugin, + DistributedDataParallelKwargs, + DistributedType, + DynamoBackend, + FP8RecipeKwargs, + FullyShardedDataParallelPlugin, + GradientAccumulationPlugin, + GradScalerKwargs, + InitProcessGroupKwargs, + KwargsHandler, + LoggerType, + MegatronLMPlugin, + PrecisionType, + ProjectConfiguration, + RNGType, + SageMakerDistributedType, + TensorInformation, + TorchDynamoPlugin, +) +from .environment import ( + are_libraries_initialized, + check_cuda_p2p_ib_support, + check_fp8_capability, + convert_dict_to_env_variables, + get_cpu_distributed_information, + get_gpu_info, + get_int_from_env, + parse_choice_from_env, + parse_flag_from_env, + set_numa_affinity, + str_to_bool, +) +from .imports import ( + get_ccl_version, + is_4bit_bnb_available, + is_8bit_bnb_available, + is_aim_available, + is_bf16_available, + is_bnb_available, + is_boto3_available, + is_ccl_available, + is_clearml_available, + is_comet_ml_available, + is_cuda_available, + is_datasets_available, + is_deepspeed_available, + is_dvclive_available, + is_fp8_available, + is_ipex_available, + is_lomo_available, + is_megatron_lm_available, + is_mlflow_available, + is_mlu_available, + is_mps_available, + is_msamp_available, + is_npu_available, + is_pandas_available, + is_peft_available, + is_pippy_available, + is_pynvml_available, + is_pytest_available, + is_rich_available, + is_sagemaker_available, + is_schedulefree_available, + is_tensorboard_available, + is_timm_available, + is_torch_xla_available, + is_torchvision_available, + is_transformer_engine_available, + is_transformers_available, + is_wandb_available, + is_xpu_available, +) +from .modeling import ( + calculate_maximum_sizes, + check_device_map, + check_tied_parameters_in_config, + check_tied_parameters_on_same_device, + compute_module_sizes, + convert_file_size_to_int, + dtype_byte_size, + find_tied_parameters, + get_balanced_memory, + get_max_layer_size, + get_max_memory, + get_mixed_precision_context_manager, + id_tensor_storage, + infer_auto_device_map, + is_peft_model, + load_checkpoint_in_model, + load_offloaded_weights, + load_state_dict, + named_module_tensors, + retie_parameters, + set_module_tensor_to_device, + shard_checkpoint, +) +from .offload import ( + OffloadedWeightsLoader, + PrefixedDataset, + extract_submodules_state_dict, + load_offloaded_weight, + offload_state_dict, + offload_weight, + save_offload_index, +) +from .operations import ( + CannotPadNestedTensorWarning, + broadcast, + broadcast_object_list, + concatenate, + convert_outputs_to_fp32, + convert_to_fp32, + copy_tensor_to_devices, + find_batch_size, + find_device, + gather, + gather_object, + get_data_structure, + honor_type, + ignorant_find_batch_size, + initialize_tensors, + is_namedtuple, + is_tensor_information, + is_torch_tensor, + listify, + pad_across_processes, + pad_input_tensors, + recursively_apply, + reduce, + send_to_device, + slice_tensors, +) +from .versions import compare_versions, is_torch_version + + +if is_deepspeed_available(): + from .deepspeed import ( + DeepSpeedEngineWrapper, + DeepSpeedOptimizerWrapper, + DeepSpeedSchedulerWrapper, + DummyOptim, + DummyScheduler, + HfDeepSpeedConfig, + ) + +from .bnb import has_4bit_bnb_layers, load_and_quantize_model +from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer +from .launch import ( + PrepareForLaunch, + _filter_args, + prepare_deepspeed_cmd_env, + prepare_multi_gpu_env, + prepare_sagemager_args_inputs, + prepare_simple_launcher_cmd_env, + prepare_tpu, +) +from .megatron_lm import ( + AbstractTrainStep, + BertTrainStep, + GPTTrainStep, + MegatronEngine, + MegatronLMDummyDataLoader, + MegatronLMDummyScheduler, + MegatronLMOptimizerWrapper, + MegatronLMSchedulerWrapper, + T5TrainStep, + avg_losses_across_data_parallel_group, + gather_across_data_parallel_groups, +) +from .megatron_lm import initialize as megatron_lm_initialize +from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader +from .megatron_lm import prepare_model as megatron_lm_prepare_model +from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer +from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler +from .memory import find_executable_batch_size, release_memory +from .other import ( + check_os_kernel, + clean_state_dict_for_safetensors, + clear_environment, + convert_bytes, + extract_model_from_parallel, + get_pretty_name, + is_port_in_use, + merge_dicts, + patch_environment, + recursive_getattr, + save, + wait_for_everyone, + write_basic_config, +) +from .random import set_seed, synchronize_rng_state, synchronize_rng_states +from .torch_xla import install_xla +from .tqdm import tqdm +from .transformer_engine import convert_model, has_transformer_engine_layers diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/deepspeed.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/deepspeed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cdaf8866ce8c5a9fdc86c8dec71da308c3be47f7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/deepspeed.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/launch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/launch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23d436b982f79f9ae4124f78b26b61b866617d1e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/launch.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/other.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/other.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d484dc60853babb5ea375bcff127d8411bd15505 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/other.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/random.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/random.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bac6b4a69441eae7c0cf850de67e3a88d95c631a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/random.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/tqdm.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/tqdm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c76d611623c117c43837764f1df78a201c3e954d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/tqdm.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/utils/bnb.py b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/bnb.py new file mode 100644 index 0000000000000000000000000000000000000000..284ee5df6e89171948745255dd33a3b2b91123a2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/bnb.py @@ -0,0 +1,467 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging +import os +from copy import deepcopy +from typing import Dict, List, Optional, Union + +import torch +import torch.nn as nn + +from accelerate.utils.imports import ( + is_4bit_bnb_available, + is_8bit_bnb_available, +) + +from ..big_modeling import dispatch_model, init_empty_weights +from .dataclasses import BnbQuantizationConfig +from .modeling import ( + find_tied_parameters, + get_balanced_memory, + infer_auto_device_map, + load_checkpoint_in_model, + offload_weight, + set_module_tensor_to_device, +) + + +logger = logging.getLogger(__name__) + + +def load_and_quantize_model( + model: torch.nn.Module, + bnb_quantization_config: BnbQuantizationConfig, + weights_location: Union[str, os.PathLike] = None, + device_map: Optional[Dict[str, Union[int, str, torch.device]]] = None, + no_split_module_classes: Optional[List[str]] = None, + max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None, + offload_folder: Optional[Union[str, os.PathLike]] = None, + offload_state_dict: bool = False, +): + """ + This function will quantize the input model with the associated config passed in `bnb_quantization_config`. If the + model is in the meta device, we will load and dispatch the weights according to the `device_map` passed. If the + model is already loaded, we will quantize the model and put the model on the GPU, + + Args: + model (`torch.nn.Module`): + Input model. The model can be already loaded or on the meta device + bnb_quantization_config (`BnbQuantizationConfig`): + The bitsandbytes quantization parameters + weights_location (`str` or `os.PathLike`): + The folder weights_location to load. It can be: + - a path to a file containing a whole model state dict + - a path to a `.json` file containing the index to a sharded checkpoint + - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint. + - a path to a folder containing a unique pytorch_model.bin file. + device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): + A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer + name, once a given module name is inside, every submodule of it will be sent to the same device. + no_split_module_classes (`List[str]`, *optional*): + A list of layer class names that should never be split across device (for instance any layer that has a + residual connection). + max_memory (`Dict`, *optional*): + A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset. + offload_folder (`str` or `os.PathLike`, *optional*): + If the `device_map` contains any value `"disk"`, the folder where we will offload weights. + offload_state_dict (`bool`, *optional*, defaults to `False`): + If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if + the weight of the CPU state dict + the biggest shard does not fit. + + Returns: + `torch.nn.Module`: The quantized model + """ + + load_in_4bit = bnb_quantization_config.load_in_4bit + load_in_8bit = bnb_quantization_config.load_in_8bit + + if load_in_8bit and not is_8bit_bnb_available(): + raise ImportError( + "You have a version of `bitsandbytes` that is not compatible with 8bit quantization," + " make sure you have the latest version of `bitsandbytes` installed." + ) + if load_in_4bit and not is_4bit_bnb_available(): + raise ValueError( + "You have a version of `bitsandbytes` that is not compatible with 4bit quantization," + "make sure you have the latest version of `bitsandbytes` installed." + ) + + modules_on_cpu = [] + # custom device map + if isinstance(device_map, dict) and len(device_map.keys()) > 1: + modules_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]] + + # We keep some modules such as the lm_head in their original dtype for numerical stability reasons + if bnb_quantization_config.skip_modules is None: + bnb_quantization_config.skip_modules = get_keys_to_not_convert(model) + + # add cpu modules to skip modules only for 4-bit modules + if load_in_4bit: + bnb_quantization_config.skip_modules.extend(modules_on_cpu) + modules_to_not_convert = bnb_quantization_config.skip_modules + + # We add the modules we want to keep in full precision + if bnb_quantization_config.keep_in_fp32_modules is None: + bnb_quantization_config.keep_in_fp32_modules = [] + keep_in_fp32_modules = bnb_quantization_config.keep_in_fp32_modules + modules_to_not_convert.extend(keep_in_fp32_modules) + + # compatibility with peft + model.is_loaded_in_4bit = load_in_4bit + model.is_loaded_in_8bit = load_in_8bit + + model_device = get_parameter_device(model) + if model_device.type != "meta": + # quantization of an already loaded model + logger.warning( + "It is not recommended to quantize a loaded model. " + "The model should be instantiated under the `init_empty_weights` context manager." + ) + model = replace_with_bnb_layers(model, bnb_quantization_config, modules_to_not_convert=modules_to_not_convert) + # convert param to the right dtype + dtype = bnb_quantization_config.torch_dtype + for name, param in model.state_dict().items(): + if any(module_to_keep_in_fp32 in name for module_to_keep_in_fp32 in keep_in_fp32_modules): + param.to(torch.float32) + if param.dtype != torch.float32: + name = name.replace(".weight", "").replace(".bias", "") + param = getattr(model, name, None) + if param is not None: + param.to(torch.float32) + elif torch.is_floating_point(param): + param.to(dtype) + if model_device.type == "cuda": + # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda + model.cuda(torch.cuda.current_device()) + torch.cuda.empty_cache() + elif torch.cuda.is_available(): + model.to(torch.cuda.current_device()) + else: + raise RuntimeError("No GPU found. A GPU is needed for quantization.") + logger.info( + f"The model device type is {model_device.type}. However, cuda is needed for quantization." + "We move the model to cuda." + ) + return model + + elif weights_location is None: + raise RuntimeError( + f"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " + ) + + else: + with init_empty_weights(): + model = replace_with_bnb_layers( + model, bnb_quantization_config, modules_to_not_convert=modules_to_not_convert + ) + + device_map = get_quantized_model_device_map( + model, + bnb_quantization_config, + device_map, + max_memory=max_memory, + no_split_module_classes=no_split_module_classes, + ) + if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): + offload_state_dict = True + + offload = any(x in list(device_map.values()) for x in ["cpu", "disk"]) + + load_checkpoint_in_model( + model, + weights_location, + device_map, + dtype=bnb_quantization_config.torch_dtype, + offload_folder=offload_folder, + offload_state_dict=offload_state_dict, + keep_in_fp32_modules=bnb_quantization_config.keep_in_fp32_modules, + offload_8bit_bnb=load_in_8bit and offload, + ) + return dispatch_model(model, device_map=device_map, offload_dir=offload_folder) + + +def get_quantized_model_device_map( + model, bnb_quantization_config, device_map=None, max_memory=None, no_split_module_classes=None +): + if device_map is None: + if torch.cuda.is_available(): + device_map = {"": torch.cuda.current_device()} + else: + raise RuntimeError("No GPU found. A GPU is needed for quantization.") + logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`.") + + if isinstance(device_map, str): + if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: + raise ValueError( + "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or " + "'sequential'." + ) + + special_dtypes = {} + special_dtypes.update( + { + name: bnb_quantization_config.torch_dtype + for name, _ in model.named_parameters() + if any(m in name for m in bnb_quantization_config.skip_modules) + } + ) + special_dtypes.update( + { + name: torch.float32 + for name, _ in model.named_parameters() + if any(m in name for m in bnb_quantization_config.keep_in_fp32_modules) + } + ) + + kwargs = {} + kwargs["special_dtypes"] = special_dtypes + kwargs["no_split_module_classes"] = no_split_module_classes + kwargs["dtype"] = bnb_quantization_config.target_dtype + + # get max_memory for each device. + if device_map != "sequential": + max_memory = get_balanced_memory( + model, + low_zero=(device_map == "balanced_low_0"), + max_memory=max_memory, + **kwargs, + ) + + kwargs["max_memory"] = max_memory + device_map = infer_auto_device_map(model, **kwargs) + + if isinstance(device_map, dict): + # check if don't have any quantized module on the cpu + modules_not_to_convert = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fp32_modules + + device_map_without_some_modules = { + key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert + } + for device in ["cpu", "disk"]: + if device in device_map_without_some_modules.values(): + if bnb_quantization_config.load_in_4bit: + raise ValueError( + """ + Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit + the quantized model. If you want to dispatch the model on the CPU or the disk while keeping + these modules in `torch_dtype`, you need to pass a custom `device_map` to + `load_and_quantize_model`. Check + https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk + for more details. + """ + ) + else: + logger.info( + "Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" + ) + del device_map_without_some_modules + return device_map + + +def replace_with_bnb_layers(model, bnb_quantization_config, modules_to_not_convert=None, current_key_name=None): + """ + A helper function to replace all `torch.nn.Linear` modules by `bnb.nn.Linear8bit` modules or by `bnb.nn.Linear4bit` + modules from the `bitsandbytes`library. The function will be run recursively and replace `torch.nn.Linear` modules. + + Parameters: + model (`torch.nn.Module`): + Input model or `torch.nn.Module` as the function is run recursively. + modules_to_not_convert (`List[str]`): + Names of the modules to not quantize convert. In practice we keep the `lm_head` in full precision for + numerical stability reasons. + current_key_name (`List[str]`, *optional*): + An array to track the current key of the recursion. This is used to check whether the current key (part of + it) is not in the list of modules to not convert. + """ + + if modules_to_not_convert is None: + modules_to_not_convert = [] + + model, has_been_replaced = _replace_with_bnb_layers( + model, bnb_quantization_config, modules_to_not_convert, current_key_name + ) + if not has_been_replaced: + logger.warning( + "You are loading your model in 8bit or 4bit but no linear modules were found in your model." + " this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers." + " Please double check your model architecture, or submit an issue on github if you think this is" + " a bug." + ) + return model + + +def _replace_with_bnb_layers( + model, + bnb_quantization_config, + modules_to_not_convert=None, + current_key_name=None, +): + """ + Private method that wraps the recursion for module replacement. + + Returns the converted model and a boolean that indicates if the conversion has been successfull or not. + """ + # bitsandbytes will initialize CUDA on import, so it needs to be imported lazily + import bitsandbytes as bnb + + has_been_replaced = False + for name, module in model.named_children(): + if current_key_name is None: + current_key_name = [] + current_key_name.append(name) + if isinstance(module, nn.Linear) and name not in modules_to_not_convert: + # Check if the current key is not in the `modules_to_not_convert` + current_key_name_str = ".".join(current_key_name) + proceed = True + for key in modules_to_not_convert: + if ( + (key in current_key_name_str) and (key + "." in current_key_name_str) + ) or key == current_key_name_str: + proceed = False + break + if proceed: + # Load bnb module with empty weight and replace ``nn.Linear` module + if bnb_quantization_config.load_in_8bit: + bnb_module = bnb.nn.Linear8bitLt( + module.in_features, + module.out_features, + module.bias is not None, + has_fp16_weights=False, + threshold=bnb_quantization_config.llm_int8_threshold, + ) + elif bnb_quantization_config.load_in_4bit: + bnb_module = bnb.nn.Linear4bit( + module.in_features, + module.out_features, + module.bias is not None, + bnb_quantization_config.bnb_4bit_compute_dtype, + compress_statistics=bnb_quantization_config.bnb_4bit_use_double_quant, + quant_type=bnb_quantization_config.bnb_4bit_quant_type, + ) + else: + raise ValueError("load_in_8bit and load_in_4bit can't be both False") + bnb_module.weight.data = module.weight.data + if module.bias is not None: + bnb_module.bias.data = module.bias.data + bnb_module.requires_grad_(False) + setattr(model, name, bnb_module) + has_been_replaced = True + if len(list(module.children())) > 0: + _, _has_been_replaced = _replace_with_bnb_layers( + module, bnb_quantization_config, modules_to_not_convert, current_key_name + ) + has_been_replaced = has_been_replaced | _has_been_replaced + # Remove the last key for recursion + current_key_name.pop(-1) + return model, has_been_replaced + + +def get_keys_to_not_convert(model): + r""" + An utility function to get the key of the module to keep in full precision if any For example for CausalLM modules + we may want to keep the lm_head in full precision for numerical stability reasons. For other architectures, we want + to keep the tied weights of the model. The function will return a list of the keys of the modules to not convert in + int8. + + Parameters: + model (`torch.nn.Module`): + Input model + """ + # Create a copy of the model + with init_empty_weights(): + tied_model = deepcopy(model) # this has 0 cost since it is done inside `init_empty_weights` context manager` + + tied_params = find_tied_parameters(tied_model) + # For compatibility with Accelerate < 0.18 + if isinstance(tied_params, dict): + tied_keys = sum(list(tied_params.values()), []) + list(tied_params.keys()) + else: + tied_keys = sum(tied_params, []) + has_tied_params = len(tied_keys) > 0 + + # Check if it is a base model + is_base_model = False + if hasattr(model, "base_model_prefix"): + is_base_model = not hasattr(model, model.base_model_prefix) + + # Ignore this for base models (BertModel, GPT2Model, etc.) + if (not has_tied_params) and is_base_model: + return [] + + # otherwise they have an attached head + list_modules = list(model.named_children()) + list_last_module = [list_modules[-1][0]] + + # add last module together with tied weights + intersection = set(list_last_module) - set(tied_keys) + list_untouched = list(set(tied_keys)) + list(intersection) + + # remove ".weight" from the keys + names_to_remove = [".weight", ".bias"] + filtered_module_names = [] + for name in list_untouched: + for name_to_remove in names_to_remove: + if name_to_remove in name: + name = name.replace(name_to_remove, "") + filtered_module_names.append(name) + + return filtered_module_names + + +def has_4bit_bnb_layers(model): + """Check if we have `bnb.nn.Linear4bit` or `bnb.nn.Linear8bitLt` layers inside our model""" + # bitsandbytes will initialize CUDA on import, so it needs to be imported lazily + import bitsandbytes as bnb + + for m in model.modules(): + if isinstance(m, bnb.nn.Linear4bit): + return True + return False + + +def get_parameter_device(parameter: nn.Module): + return next(parameter.parameters()).device + + +def quantize_and_offload_8bit(model, param, param_name, new_dtype, offload_folder, offload_index, fp16_statistics): + # if it is not quantized, we quantize and offload the quantized weights and the SCB stats + if fp16_statistics is None: + set_module_tensor_to_device(model, param_name, 0, dtype=new_dtype, value=param) + tensor_name = param_name + module = model + if "." in tensor_name: + splits = tensor_name.split(".") + for split in splits[:-1]: + new_module = getattr(module, split) + if new_module is None: + raise ValueError(f"{module} has no attribute {split}.") + module = new_module + tensor_name = splits[-1] + # offload weights + module._parameters[tensor_name].requires_grad = False + offload_weight(module._parameters[tensor_name], param_name, offload_folder, index=offload_index) + if hasattr(module._parameters[tensor_name], "SCB"): + offload_weight( + module._parameters[tensor_name].SCB, + param_name.replace("weight", "SCB"), + offload_folder, + index=offload_index, + ) + else: + offload_weight(param, param_name, offload_folder, index=offload_index) + offload_weight(fp16_statistics, param_name.replace("weight", "SCB"), offload_folder, index=offload_index) + + set_module_tensor_to_device(model, param_name, "meta", dtype=new_dtype, value=torch.empty(*param.size())) diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/utils/constants.py b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..8c299570757cb6a5df93f4794e403d1581dd7c2e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/constants.py @@ -0,0 +1,72 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import operator as op + + +SCALER_NAME = "scaler.pt" +MODEL_NAME = "pytorch_model" +SAFE_MODEL_NAME = "model" +RNG_STATE_NAME = "random_states" +OPTIMIZER_NAME = "optimizer" +SCHEDULER_NAME = "scheduler" +SAMPLER_NAME = "sampler" +WEIGHTS_NAME = f"{MODEL_NAME}.bin" +WEIGHTS_INDEX_NAME = f"{WEIGHTS_NAME}.index.json" +SAFE_WEIGHTS_NAME = f"{SAFE_MODEL_NAME}.safetensors" +SAFE_WEIGHTS_INDEX_NAME = f"{SAFE_WEIGHTS_NAME}.index.json" +SAGEMAKER_PYTORCH_VERSION = "1.10.2" +SAGEMAKER_PYTHON_VERSION = "py38" +SAGEMAKER_TRANSFORMERS_VERSION = "4.17.0" +SAGEMAKER_PARALLEL_EC2_INSTANCES = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"] +FSDP_SHARDING_STRATEGY = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"] +FSDP_AUTO_WRAP_POLICY = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"] +FSDP_BACKWARD_PREFETCH = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"] +FSDP_STATE_DICT_TYPE = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"] +FSDP_PYTORCH_VERSION = "2.1.0" +FSDP_MODEL_NAME = "pytorch_model_fsdp" +DEEPSPEED_MULTINODE_LAUNCHERS = ["pdsh", "standard", "openmpi", "mvapich", "mpich"] +TORCH_DYNAMO_MODES = ["default", "reduce-overhead", "max-autotune"] + +STR_OPERATION_TO_FUNC = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt} + +# These are the args for `torch.distributed.launch` for pytorch < 1.9 +TORCH_LAUNCH_PARAMS = [ + "nnodes", + "nproc_per_node", + "rdzv_backend", + "rdzv_endpoint", + "rdzv_id", + "rdzv_conf", + "standalone", + "max_restarts", + "monitor_interval", + "start_method", + "role", + "module", + "m", + "no_python", + "run_path", + "log_dir", + "r", + "redirects", + "t", + "tee", + "node_rank", + "master_addr", + "master_port", +] + +CUDA_DISTRIBUTED_TYPES = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"] +TORCH_DISTRIBUTED_OPERATION_TYPES = CUDA_DISTRIBUTED_TYPES + ["MULTI_NPU", "MULTI_MLU", "MULTI_XPU", "MULTI_CPU"] diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/utils/deepspeed.py b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/deepspeed.py new file mode 100644 index 0000000000000000000000000000000000000000..fe5a63fc7314d42f68baae41cf56f9abc94237a0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/deepspeed.py @@ -0,0 +1,271 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import json +import os +from copy import deepcopy + +from ..optimizer import AcceleratedOptimizer +from ..scheduler import AcceleratedScheduler + + +class HfDeepSpeedConfig: + """ + This object contains a DeepSpeed configuration dictionary and can be quickly queried for things like zero stage. + + A `weakref` of this object is stored in the module's globals to be able to access the config from areas where + things like the Trainer object is not available (e.g. `from_pretrained` and `_get_resized_embeddings`). Therefore + it's important that this object remains alive while the program is still running. + + [`Trainer`] uses the `HfTrainerDeepSpeedConfig` subclass instead. That subclass has logic to sync the configuration + with values of [`TrainingArguments`] by replacing special placeholder values: `"auto"`. Without this special logic + the DeepSpeed configuration is not modified in any way. + + Args: + config_file_or_dict (`Union[str, Dict]`): path to DeepSpeed config file or dict. + + """ + + def __init__(self, config_file_or_dict): + if isinstance(config_file_or_dict, dict): + # Don't modify user's data should they want to reuse it (e.g. in tests), because once we + # modified it, it will not be accepted here again, since `auto` values would have been overridden + config = deepcopy(config_file_or_dict) + elif os.path.exists(config_file_or_dict): + with open(config_file_or_dict, encoding="utf-8") as f: + config = json.load(f) + else: + try: + config_decoded = base64.urlsafe_b64decode(config_file_or_dict).decode("utf-8") + config = json.loads(config_decoded) + except (UnicodeDecodeError, AttributeError, ValueError): + raise ValueError( + f"Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}" + ) + + self.config = config + + self.set_stage_and_offload() + + def set_stage_and_offload(self): + # zero stage - this is done as early as possible, before model is created, to allow + # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object + # during ``zero.Init()`` which needs to know the dtype, and some other hparams. + self._stage = self.get_value("zero_optimization.stage", -1) + + # offload + self._offload = False + if self.is_zero2() or self.is_zero3(): + offload_devices_valid = set(["cpu", "nvme"]) + offload_devices = set( + [ + self.get_value("zero_optimization.offload_optimizer.device"), + self.get_value("zero_optimization.offload_param.device"), + ] + ) + if len(offload_devices & offload_devices_valid) > 0: + self._offload = True + + def find_config_node(self, ds_key_long): + config = self.config + + # find the config node of interest if it exists + nodes = ds_key_long.split(".") + ds_key = nodes.pop() + for node in nodes: + config = config.get(node) + if config is None: + return None, ds_key + + return config, ds_key + + def get_value(self, ds_key_long, default=None): + """ + Returns the set value or `default` if no value is set + """ + config, ds_key = self.find_config_node(ds_key_long) + if config is None: + return default + return config.get(ds_key, default) + + def del_config_sub_tree(self, ds_key_long, must_exist=False): + """ + Deletes a sub-section of the config file if it's found. + + Unless `must_exist` is `True` the section doesn't have to exist. + """ + config = self.config + + # find the config node of interest if it exists + nodes = ds_key_long.split(".") + for node in nodes: + parent_config = config + config = config.get(node) + if config is None: + if must_exist: + raise ValueError(f"Can't find {ds_key_long} entry in the config: {self.config}") + else: + return + + # if found remove it + if parent_config is not None: + parent_config.pop(node) + + def is_true(self, ds_key_long): + """ + Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very + specific question of whether the value is set to `True` (and it's not set to `False`` or isn't set). + + """ + value = self.get_value(ds_key_long) + return False if value is None else bool(value) + + def is_false(self, ds_key_long): + """ + Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very + specific question of whether the value is set to `False` (and it's not set to `True`` or isn't set). + """ + value = self.get_value(ds_key_long) + return False if value is None else not bool(value) + + def is_zero2(self): + return self._stage == 2 + + def is_zero3(self): + return self._stage == 3 + + def is_offload(self): + return self._offload + + +class DeepSpeedEngineWrapper: + """ + Internal wrapper for deepspeed.runtime.engine.DeepSpeedEngine. This is used to follow conventional training loop. + + Args: + engine (deepspeed.runtime.engine.DeepSpeedEngine): deepspeed engine to wrap + """ + + def __init__(self, engine): + self.engine = engine + + def backward(self, loss, **kwargs): + # runs backpropagation and handles mixed precision + self.engine.backward(loss, **kwargs) + + # Deepspeed's `engine.step` performs the following operations: + # - gradient accumulation check + # - gradient clipping + # - optimizer step + # - zero grad + # - checking overflow + # - lr_scheduler step (only if engine.lr_scheduler is not None) + self.engine.step() + # and this plugin overrides the above calls with no-ops when Accelerate runs under + # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple + # training loop that works transparently under many training regimes. + + +class DeepSpeedOptimizerWrapper(AcceleratedOptimizer): + """ + Internal wrapper around a deepspeed optimizer. + + Args: + optimizer (`torch.optim.optimizer.Optimizer`): + The optimizer to wrap. + """ + + def __init__(self, optimizer): + super().__init__(optimizer, device_placement=False, scaler=None) + self.__has_overflow__ = hasattr(self.optimizer, "overflow") + + def zero_grad(self, set_to_none=None): + pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed + + def step(self): + pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed + + @property + def step_was_skipped(self): + """Whether or not the optimizer step was done, or skipped because of gradient overflow.""" + if self.__has_overflow__: + return self.optimizer.overflow + return False + + +class DeepSpeedSchedulerWrapper(AcceleratedScheduler): + """ + Internal wrapper around a deepspeed scheduler. + + Args: + scheduler (`torch.optim.lr_scheduler.LambdaLR`): + The scheduler to wrap. + optimizers (one or a list of `torch.optim.Optimizer`): + """ + + def __init__(self, scheduler, optimizers): + super().__init__(scheduler, optimizers) + + def step(self): + pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed + + +class DummyOptim: + """ + Dummy optimizer presents model parameters or param groups, this is primarily used to follow conventional training + loop when optimizer config is specified in the deepspeed config file. + + Args: + lr (float): + Learning rate. + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + weight_decay (float): + Weight decay. + **kwargs (additional keyword arguments, *optional*): + Other arguments. + """ + + def __init__(self, params, lr=0.001, weight_decay=0, **kwargs): + self.params = params + self.lr = lr + self.weight_decay = weight_decay + self.kwargs = kwargs + + +class DummyScheduler: + """ + Dummy scheduler presents model parameters or param groups, this is primarily used to follow conventional training + loop when scheduler config is specified in the deepspeed config file. + + Args: + optimizer (`torch.optim.optimizer.Optimizer`): + The optimizer to wrap. + total_num_steps (int, *optional*): + Total number of steps. + warmup_num_steps (int, *optional*): + Number of steps for warmup. + lr_scheduler_callable (callable, *optional*): + A callable function that creates an LR Scheduler. It accepts only one argument `optimizer`. + **kwargs (additional keyword arguments, *optional*): + Other arguments. + """ + + def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, lr_scheduler_callable=None, **kwargs): + self.optimizer = optimizer + self.total_num_steps = total_num_steps + self.warmup_num_steps = warmup_num_steps + self.lr_scheduler_callable = lr_scheduler_callable + self.kwargs = kwargs diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/utils/fsdp_utils.py b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/fsdp_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..01bb54b262b7f00b4bfb0933fc5fe94b24146097 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/fsdp_utils.py @@ -0,0 +1,209 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os + +import torch + +from ..logging import get_logger +from .constants import FSDP_MODEL_NAME, FSDP_PYTORCH_VERSION, OPTIMIZER_NAME +from .imports import is_torch_distributed_available +from .modeling import is_peft_model +from .versions import is_torch_version + + +if is_torch_version(">=", FSDP_PYTORCH_VERSION) and is_torch_distributed_available(): + import torch.distributed.checkpoint as dist_cp + from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner + from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict + from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP + from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType + + +logger = get_logger(__name__) + + +def _get_model_state_dict(model, adapter_only=False): + if adapter_only and is_peft_model(model): + from peft import get_peft_model_state_dict + + return get_peft_model_state_dict(model, adapter_name=model.active_adapter) + else: + return model.state_dict() + + +def _set_model_state_dict(model, state_dict, adapter_only=False): + if adapter_only and is_peft_model(model): + from peft import set_peft_model_state_dict + + return set_peft_model_state_dict(model, state_dict, adapter_name=model.active_adapter) + else: + return model.load_state_dict(state_dict) + + +def save_fsdp_model(fsdp_plugin, accelerator, model, output_dir, model_index=0, adapter_only=False): + os.makedirs(output_dir, exist_ok=True) + + if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: + # FSDP raises error when single GPU is used with `offload_to_cpu=True` for FULL_STATE_DICT + # so, only enable it when num_processes>1 + is_multi_process = accelerator.num_processes > 1 + fsdp_plugin.state_dict_config.offload_to_cpu = is_multi_process + fsdp_plugin.state_dict_config.rank0_only = is_multi_process + + with FSDP.state_dict_type( + model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config + ): + state_dict = _get_model_state_dict(model, adapter_only=adapter_only) + if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: + weights_name = f"{FSDP_MODEL_NAME}.bin" if model_index == 0 else f"{FSDP_MODEL_NAME}_{model_index}.bin" + output_model_file = os.path.join(output_dir, weights_name) + if accelerator.process_index == 0: + logger.info(f"Saving model to {output_model_file}") + torch.save(state_dict, output_model_file) + logger.info(f"Model saved to {output_model_file}") + elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: + weights_name = ( + f"{FSDP_MODEL_NAME}_rank{accelerator.process_index}.bin" + if model_index == 0 + else f"{FSDP_MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin" + ) + output_model_file = os.path.join(output_dir, weights_name) + logger.info(f"Saving model to {output_model_file}") + torch.save(state_dict, output_model_file) + logger.info(f"Model saved to {output_model_file}") + elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: + ckpt_dir = os.path.join(output_dir, f"{FSDP_MODEL_NAME}_{model_index}") + os.makedirs(ckpt_dir, exist_ok=True) + logger.info(f"Saving model to {ckpt_dir}") + state_dict = {"model": state_dict} + + dist_cp.save_state_dict( + state_dict=state_dict, + storage_writer=dist_cp.FileSystemWriter(ckpt_dir), + planner=DefaultSavePlanner(), + ) + logger.info(f"Model saved to {ckpt_dir}") + + +def load_fsdp_model(fsdp_plugin, accelerator, model, input_dir, model_index=0, adapter_only=False): + accelerator.wait_for_everyone() + if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: + # FSDP raises error when single GPU is used with `offload_to_cpu=True` for FULL_STATE_DICT + # so, only enable it when num_processes>1 + is_multi_process = accelerator.num_processes > 1 + fsdp_plugin.state_dict_config.offload_to_cpu = is_multi_process + fsdp_plugin.state_dict_config.rank0_only = is_multi_process + with FSDP.state_dict_type( + model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config + ): + if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: + if type(model) != FSDP and accelerator.process_index != 0: + if not fsdp_plugin.sync_module_states: + raise ValueError( + "Set the `sync_module_states` flag to `True` so that model states are synced across processes when " + "initializing FSDP object" + ) + return + weights_name = f"{FSDP_MODEL_NAME}.bin" if model_index == 0 else f"{FSDP_MODEL_NAME}_{model_index}.bin" + input_model_file = os.path.join(input_dir, weights_name) + logger.info(f"Loading model from {input_model_file}") + state_dict = torch.load(input_model_file) + logger.info(f"Model loaded from {input_model_file}") + elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: + weights_name = ( + f"{FSDP_MODEL_NAME}_rank{accelerator.process_index}.bin" + if model_index == 0 + else f"{FSDP_MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin" + ) + input_model_file = os.path.join(input_dir, weights_name) + logger.info(f"Loading model from {input_model_file}") + state_dict = torch.load(input_model_file) + logger.info(f"Model loaded from {input_model_file}") + elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: + ckpt_dir = ( + os.path.join(input_dir, f"{FSDP_MODEL_NAME}_{model_index}") + if f"{FSDP_MODEL_NAME}" not in input_dir + else input_dir + ) + logger.info(f"Loading model from {ckpt_dir}") + state_dict = {"model": _get_model_state_dict(model, adapter_only=adapter_only)} + dist_cp.load_state_dict( + state_dict=state_dict, + storage_reader=dist_cp.FileSystemReader(ckpt_dir), + planner=DefaultLoadPlanner(), + ) + state_dict = state_dict["model"] + logger.info(f"Model loaded from {ckpt_dir}") + load_result = _set_model_state_dict(model, state_dict, adapter_only=adapter_only) + return load_result + + +def save_fsdp_optimizer(fsdp_plugin, accelerator, optimizer, model, output_dir, optimizer_index=0): + os.makedirs(output_dir, exist_ok=True) + with FSDP.state_dict_type( + model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config + ): + optim_state = FSDP.optim_state_dict(model, optimizer) + if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: + if accelerator.process_index == 0: + optim_state_name = ( + f"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else f"{OPTIMIZER_NAME}_{optimizer_index}.bin" + ) + output_optimizer_file = os.path.join(output_dir, optim_state_name) + logger.info(f"Saving Optimizer state to {output_optimizer_file}") + torch.save(optim_state, output_optimizer_file) + logger.info(f"Optimizer state saved in {output_optimizer_file}") + else: + ckpt_dir = os.path.join(output_dir, f"{OPTIMIZER_NAME}_{optimizer_index}") + os.makedirs(ckpt_dir, exist_ok=True) + logger.info(f"Saving Optimizer state to {ckpt_dir}") + dist_cp.save_state_dict( + state_dict={"optimizer": optim_state}, + storage_writer=dist_cp.FileSystemWriter(ckpt_dir), + planner=DefaultSavePlanner(), + ) + logger.info(f"Optimizer state saved in {ckpt_dir}") + + +def load_fsdp_optimizer(fsdp_plugin, accelerator, optimizer, model, input_dir, optimizer_index=0, adapter_only=False): + accelerator.wait_for_everyone() + with FSDP.state_dict_type( + model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config + ): + if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: + optim_state = None + if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: + optimizer_name = ( + f"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else f"{OPTIMIZER_NAME}_{optimizer_index}.bin" + ) + input_optimizer_file = os.path.join(input_dir, optimizer_name) + logger.info(f"Loading Optimizer state from {input_optimizer_file}") + optim_state = torch.load(input_optimizer_file) + logger.info(f"Optimizer state loaded from {input_optimizer_file}") + else: + ckpt_dir = ( + os.path.join(input_dir, f"{OPTIMIZER_NAME}_{optimizer_index}") + if f"{OPTIMIZER_NAME}" not in input_dir + else input_dir + ) + logger.info(f"Loading Optimizer from {ckpt_dir}") + optim_state = load_sharded_optimizer_state_dict( + model_state_dict=_get_model_state_dict(model, adapter_only=adapter_only), + optimizer_key="optimizer", + storage_reader=dist_cp.FileSystemReader(ckpt_dir), + ) + optim_state = optim_state["optimizer"] + logger.info(f"Optimizer loaded from {ckpt_dir}") + flattened_osd = FSDP.optim_state_dict_to_load(model=model, optim=optimizer, optim_state_dict=optim_state) + optimizer.load_state_dict(flattened_osd) diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/utils/memory.py b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/memory.py new file mode 100644 index 0000000000000000000000000000000000000000..0141bf5f60430fa521de6cf196ac511a50790bb3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/memory.py @@ -0,0 +1,158 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +A collection of utilities for ensuring that training can always occur. Heavily influenced by the +[toma](https://github.com/BlackHC/toma) library. +""" + +import functools +import gc +import inspect + +import torch + +from .imports import is_mlu_available, is_mps_available, is_npu_available, is_xpu_available + + +def release_memory(*objects): + """ + Releases memory from `objects` by setting them to `None` and calls `gc.collect()` and `torch.cuda.empty_cache()`. + Returned objects should be reassigned to the same variables. + + Args: + objects (`Iterable`): + An iterable of objects + Returns: + A list of `None` objects to replace `objects` + + Example: + + ```python + >>> import torch + >>> from accelerate.utils import release_memory + + >>> a = torch.ones(1000, 1000).cuda() + >>> b = torch.ones(1000, 1000).cuda() + >>> a, b = release_memory(a, b) + ``` + """ + if not isinstance(objects, list): + objects = list(objects) + for i in range(len(objects)): + objects[i] = None + gc.collect() + if is_xpu_available(): + torch.xpu.empty_cache() + elif is_mlu_available(): + torch.mlu.empty_cache() + elif is_npu_available(): + torch.npu.empty_cache() + elif is_mps_available(): + torch.mps.empty_cache() + else: + torch.cuda.empty_cache() + return objects + + +def should_reduce_batch_size(exception: Exception) -> bool: + """ + Checks if `exception` relates to CUDA out-of-memory, CUDNN not supported, or CPU out-of-memory + + Args: + exception (`Exception`): + An exception + """ + _statements = [ + "CUDA out of memory.", # CUDA OOM + "cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU + "DefaultCPUAllocator: can't allocate memory", # CPU OOM + ] + if isinstance(exception, RuntimeError) and len(exception.args) == 1: + return any(err in exception.args[0] for err in _statements) + return False + + +def find_executable_batch_size(function: callable = None, starting_batch_size: int = 128): + """ + A basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or + CUDNN, the batch size is cut in half and passed to `function` + + `function` must take in a `batch_size` parameter as its first argument. + + Args: + function (`callable`, *optional*): + A function to wrap + starting_batch_size (`int`, *optional*): + The batch size to try and fit into memory + + Example: + + ```python + >>> from accelerate.utils import find_executable_batch_size + + + >>> @find_executable_batch_size(starting_batch_size=128) + ... def train(batch_size, model, optimizer): + ... ... + + + >>> train(model, optimizer) + ``` + """ + if function is None: + return functools.partial(find_executable_batch_size, starting_batch_size=starting_batch_size) + + batch_size = starting_batch_size + + def decorator(*args, **kwargs): + nonlocal batch_size + gc.collect() + if is_xpu_available(): + torch.xpu.empty_cache() + elif is_mlu_available(): + torch.mlu.empty_cache() + elif is_npu_available(): + torch.npu.empty_cache() + else: + torch.cuda.empty_cache() + params = list(inspect.signature(function).parameters.keys()) + # Guard against user error + if len(params) < (len(args) + 1): + arg_str = ", ".join([f"{arg}={value}" for arg, value in zip(params[1:], args[1:])]) + raise TypeError( + f"Batch size was passed into `{function.__name__}` as the first argument when called." + f"Remove this as the decorator already does so: `{function.__name__}({arg_str})`" + ) + while True: + if batch_size == 0: + raise RuntimeError("No executable batch size found, reached zero.") + try: + return function(batch_size, *args, **kwargs) + except Exception as e: + if should_reduce_batch_size(e): + gc.collect() + if is_xpu_available(): + torch.xpu.empty_cache() + elif is_mlu_available(): + torch.mlu.empty_cache() + elif is_npu_available(): + torch.npu.empty_cache() + else: + torch.cuda.empty_cache() + batch_size //= 2 + else: + raise + + return decorator diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/utils/other.py b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/other.py new file mode 100644 index 0000000000000000000000000000000000000000..a313d08685be25707109c4973b346cdb0a4af90b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/other.py @@ -0,0 +1,366 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import os +import platform +import re +import socket +from contextlib import contextmanager +from functools import partial, reduce +from types import MethodType +from typing import OrderedDict + +import torch +from packaging.version import Version +from safetensors.torch import save_file as safe_save_file + +from ..commands.config.default import write_basic_config # noqa: F401 +from ..logging import get_logger +from ..state import PartialState +from .constants import FSDP_PYTORCH_VERSION +from .dataclasses import DistributedType +from .imports import is_deepspeed_available, is_torch_distributed_available, is_torch_xla_available +from .modeling import id_tensor_storage +from .transformer_engine import convert_model +from .versions import is_torch_version + + +logger = get_logger(__name__) + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + +def is_compiled_module(module): + """ + Check whether the module was compiled with torch.compile() + """ + if is_torch_version("<", "2.0.0") or not hasattr(torch, "_dynamo"): + return False + return isinstance(module, torch._dynamo.eval_frame.OptimizedModule) + + +def extract_model_from_parallel(model, keep_fp32_wrapper: bool = True, recursive: bool = False): + """ + Extract a model from its distributed containers. + + Args: + model (`torch.nn.Module`): + The model to extract. + keep_fp32_wrapper (`bool`, *optional*): + Whether to remove mixed precision hooks from the model. + recursive (`bool`, *optional*, defaults to `False`): + Whether to recursively extract all cases of `module.module` from `model` as well as unwrap child sublayers + recursively, not just the top-level distributed containers. + + Returns: + `torch.nn.Module`: The extracted model. + """ + options = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) + + is_compiled = is_compiled_module(model) + if is_compiled: + compiled_model = model + model = model._orig_mod + + if is_deepspeed_available(): + from deepspeed import DeepSpeedEngine + + options += (DeepSpeedEngine,) + + if is_torch_version(">=", FSDP_PYTORCH_VERSION) and is_torch_distributed_available(): + from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP + + options += (FSDP,) + + while isinstance(model, options): + model = model.module + + if recursive: + # This is needed in cases such as using FSDPv2 on XLA + def _recursive_unwrap(module): + # Wrapped modules are standardly wrapped as `module`, similar to the cases earlier + # with DDP, DataParallel, DeepSpeed, and FSDP + if hasattr(module, "module"): + unwrapped_module = _recursive_unwrap(module.module) + else: + unwrapped_module = module + # Next unwrap child sublayers recursively + for name, child in unwrapped_module.named_children(): + setattr(unwrapped_module, name, _recursive_unwrap(child)) + return unwrapped_module + + # Start with top-level + model = _recursive_unwrap(model) + + if not keep_fp32_wrapper: + forward = model.forward + original_forward = model.__dict__.pop("_original_forward", None) + if original_forward is not None: + while hasattr(forward, "__wrapped__"): + forward = forward.__wrapped__ + if forward == original_forward: + break + model.forward = MethodType(forward, model) + if getattr(model, "_converted_to_transformer_engine", False): + convert_model(model, to_transformer_engine=False) + + if is_compiled: + compiled_model._orig_mod = model + model = compiled_model + + return model + + +def wait_for_everyone(): + """ + Introduces a blocking point in the script, making sure all processes have reached this point before continuing. + + + + Make sure all processes will reach this instruction otherwise one of your processes will hang forever. + + + """ + PartialState().wait_for_everyone() + + +def clean_state_dict_for_safetensors(state_dict: dict): + """ + Cleans the state dictionary from a model and removes tensor aliasing if present. + + Args: + state_dict (`dict`): + The state dictionary from a model + """ + ptrs = collections.defaultdict(list) + # When bnb serialization is used, weights in state dict can be strings + for name, tensor in state_dict.items(): + if not isinstance(tensor, str): + ptrs[id_tensor_storage(tensor)].append(name) + + # These are all pointers of tensors with shared memory + shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1} + warn_names = set() + for names in shared_ptrs.values(): + # When not all duplicates have been cleaned, we still remove those keys but put a clear warning. + # If the link between tensors was done at runtime then `from_pretrained` will not get + # the key back leading to random tensor. A proper warning will be shown + # during reload (if applicable), but since the file is not necessarily compatible with + # the config, better show a proper warning. + found_names = [name for name in names if name in state_dict] + warn_names.update(found_names[1:]) + for name in found_names[1:]: + del state_dict[name] + if len(warn_names) > 0: + logger.warning( + f"Removed shared tensor {warn_names} while saving. This should be OK, but check by verifying that you don't receive any warning while reloading", + ) + state_dict = {k: v.contiguous() if isinstance(v, torch.Tensor) else v for k, v in state_dict.items()} + return state_dict + + +def save(obj, f, save_on_each_node: bool = False, safe_serialization: bool = False): + """ + Save the data to disk. Use in place of `torch.save()`. + + Args: + obj: + The data to save + f: + The file (or file-like object) to use to save the data + save_on_each_node (`bool`, *optional*, defaults to `False`): + Whether to only save on the global main process + safe_serialization (`bool`, *optional*, defaults to `False`): + Whether to save `obj` using `safetensors` or the traditional PyTorch way (that uses `pickle`). + """ + # When TorchXLA is enabled, it's necessary to transfer all data to the CPU before saving. + # Another issue arises with `id_tensor_storage`, which treats all XLA tensors as identical. + # If tensors remain on XLA, calling `clean_state_dict_for_safetensors` will result in only + # one XLA tensor remaining. + if PartialState().distributed_type == DistributedType.XLA: + obj = xm._maybe_convert_to_cpu(obj) + # Check if it's a model and remove duplicates + if safe_serialization: + save_func = partial(safe_save_file, metadata={"format": "pt"}) + if isinstance(obj, OrderedDict): + obj = clean_state_dict_for_safetensors(obj) + else: + save_func = torch.save + + if PartialState().is_main_process and not save_on_each_node: + save_func(obj, f) + elif PartialState().is_local_main_process and save_on_each_node: + save_func(obj, f) + + +@contextmanager +def clear_environment(): + """ + A context manager that will temporarily clear environment variables. + + When this context exits, the previous environment variables will be back. + + Example: + + ```python + >>> import os + >>> from accelerate.utils import clear_environment + + >>> os.environ["FOO"] = "bar" + >>> with clear_environment(): + ... print(os.environ) + ... os.environ["FOO"] = "new_bar" + ... print(os.environ["FOO"]) + {} + new_bar + + >>> print(os.environ["FOO"]) + bar + ``` + """ + _old_os_environ = os.environ.copy() + os.environ.clear() + + try: + yield + finally: + os.environ.clear() # clear any added keys, + os.environ.update(_old_os_environ) # then restore previous environment + + +@contextmanager +def patch_environment(**kwargs): + """ + A context manager that will add each keyword argument passed to `os.environ` and remove them when exiting. + + Will convert the values in `kwargs` to strings and upper-case all the keys. + + Example: + + ```python + >>> import os + >>> from accelerate.utils import patch_environment + + >>> with patch_environment(FOO="bar"): + ... print(os.environ["FOO"]) # prints "bar" + >>> print(os.environ["FOO"]) # raises KeyError + ``` + """ + existing_vars = {} + for key, value in kwargs.items(): + key = key.upper() + if key in os.environ: + existing_vars[key] = os.environ[key] + os.environ[key] = str(value) + + try: + yield + finally: + for key in kwargs: + key = key.upper() + if key in existing_vars: + # restore previous value + os.environ[key] = existing_vars[key] + else: + os.environ.pop(key, None) + + +def get_pretty_name(obj): + """ + Gets a pretty name from `obj`. + """ + if not hasattr(obj, "__qualname__") and not hasattr(obj, "__name__"): + obj = getattr(obj, "__class__", obj) + if hasattr(obj, "__qualname__"): + return obj.__qualname__ + if hasattr(obj, "__name__"): + return obj.__name__ + return str(obj) + + +def merge_dicts(source, destination): + """ + Recursively merges two dictionaries. + + Args: + source (`dict`): The dictionary to merge into `destination`. + destination (`dict`): The dictionary to merge `source` into. + """ + for key, value in source.items(): + if isinstance(value, dict): + node = destination.setdefault(key, {}) + merge_dicts(value, node) + else: + destination[key] = value + + return destination + + +def is_port_in_use(port: int = None) -> bool: + """ + Checks if a port is in use on `localhost`. Useful for checking if multiple `accelerate launch` commands have been + run and need to see if the port is already in use. + """ + if port is None: + port = 29500 + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + return s.connect_ex(("localhost", port)) == 0 + + +def convert_bytes(size): + "Converts `size` from bytes to the largest possible unit" + for x in ["bytes", "KB", "MB", "GB", "TB"]: + if size < 1024.0: + return f"{round(size, 2)} {x}" + size /= 1024.0 + + return f"{round(size, 2)} PB" + + +def check_os_kernel(): + """Warns if the kernel version is below the recommended minimum on Linux.""" + # see issue #1929 + info = platform.uname() + system = info.system + if system != "Linux": + return + + _, version, *_ = re.split(r"(\d+\.\d+\.\d+)", info.release) + min_version = "5.5.0" + if Version(version) < Version(min_version): + msg = ( + f"Detected kernel version {version}, which is below the recommended minimum of {min_version}; this can " + "cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher." + ) + logger.warning(msg, main_process_only=True) + + +def recursive_getattr(obj, attr: str): + """ + Recursive `getattr`. + + Args: + obj: + A class instance holding the attribute. + attr (`str`): + The attribute that is to be retrieved, e.g. 'attribute1.attribute2'. + """ + + def _getattr(obj, attr): + return getattr(obj, attr) + + return reduce(_getattr, [obj] + attr.split(".")) diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/utils/torch_xla.py b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/torch_xla.py new file mode 100644 index 0000000000000000000000000000000000000000..140133926c2f88d39c70f5a9f46a08f88bed36da --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/torch_xla.py @@ -0,0 +1,51 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib.metadata +import subprocess +import sys + + +def install_xla(upgrade: bool = False): + """ + Helper function to install appropriate xla wheels based on the `torch` version in Google Colaboratory. + + Args: + upgrade (`bool`, *optional*, defaults to `False`): + Whether to upgrade `torch` and install the latest `torch_xla` wheels. + + Example: + + ```python + >>> from accelerate.utils import install_xla + + >>> install_xla(upgrade=True) + ``` + """ + in_colab = False + if "IPython" in sys.modules: + in_colab = "google.colab" in str(sys.modules["IPython"].get_ipython()) + + if in_colab: + if upgrade: + torch_install_cmd = ["pip", "install", "-U", "torch"] + subprocess.run(torch_install_cmd, check=True) + # get the current version of torch + torch_version = importlib.metadata.version("torch") + torch_version_trunc = torch_version[: torch_version.rindex(".")] + xla_wheel = f"https://storage.googleapis.com/tpu-pytorch/wheels/colab/torch_xla-{torch_version_trunc}-cp37-cp37m-linux_x86_64.whl" + xla_install_cmd = ["pip", "install", xla_wheel] + subprocess.run(xla_install_cmd, check=True) + else: + raise RuntimeError("`install_xla` utility works only on google colab.") diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/utils/tqdm.py b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/tqdm.py new file mode 100644 index 0000000000000000000000000000000000000000..1771366c84d5a1f5d07489d19de77a17c97dbd89 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/tqdm.py @@ -0,0 +1,47 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings + +from .imports import is_tqdm_available + + +if is_tqdm_available(): + from tqdm.auto import tqdm as _tqdm + +from ..state import PartialState + + +def tqdm(*args, main_process_only: bool = True, **kwargs): + """ + Wrapper around `tqdm.tqdm` that optionally displays only on the main process. + + Args: + main_process_only (`bool`, *optional*): + Whether to display the progress bar only on the main process + """ + if not is_tqdm_available(): + raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.") + if len(args) > 0 and isinstance(args[0], bool): + warnings.warn( + f"Passing `{args[0]}` as the first argument to Accelerate's `tqdm` wrapper is deprecated " + "and will be removed in v0.33.0. Please use the `main_process_only` keyword argument instead.", + FutureWarning, + ) + main_process_only = args[0] + args = args[1:] + disable = kwargs.pop("disable", False) + if main_process_only and not disable: + disable = PartialState().local_process_index != 0 + return _tqdm(*args, **kwargs, disable=disable) diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/utils/transformer_engine.py b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/transformer_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..211a9f553ca22ac4938969416d07a9b139918b60 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/transformer_engine.py @@ -0,0 +1,84 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch.nn as nn + +from .imports import is_fp8_available + + +if is_fp8_available(): + import transformer_engine.pytorch as te + + +def convert_model(model, to_transformer_engine=True, _convert_linear=True, _convert_ln=True): + """ + Recursively converts the linear and layernorm layers of a model to their `transformers_engine` counterpart. + """ + if not is_fp8_available(): + raise ImportError("Using `convert_model` requires transformer_engine to be installed.") + for name, module in model.named_children(): + if isinstance(module, nn.Linear) and to_transformer_engine and _convert_linear: + # Return early if the linear layer weights are not multiples of 16 + if any(p % 16 != 0 for p in module.weight.shape): + return + has_bias = module.bias is not None + te_module = te.Linear( + module.in_features, module.out_features, bias=has_bias, params_dtype=module.weight.dtype + ) + te_module.weight.copy_(module.weight) + if has_bias: + te_module.bias.copy_(module.bias) + + setattr(model, name, te_module) + elif isinstance(module, nn.LayerNorm) and to_transformer_engine and _convert_ln: + te_module = te.LayerNorm(module.normalized_shape[0], eps=module.eps, params_dtype=module.weight.dtype) + te_module.weight.copy_(module.weight) + te_module.bias.copy_(module.bias) + + setattr(model, name, te_module) + elif isinstance(module, te.Linear) and not to_transformer_engine and _convert_linear: + has_bias = module.bias is not None + new_module = nn.Linear( + module.in_features, module.out_features, bias=has_bias, params_dtype=module.weight.dtype + ) + new_module.weight.copy_(module.weight) + if has_bias: + new_module.bias.copy_(module.bias) + + setattr(model, name, new_module) + elif isinstance(module, te.LayerNorm) and not to_transformer_engine and _convert_ln: + new_module = nn.LayerNorm(module.normalized_shape[0], eps=module.eps, params_dtype=module.weight.dtype) + new_module.weight.copy_(module.weight) + new_module.bias.copy_(module.bias) + + setattr(model, name, new_module) + else: + convert_model( + module, + to_transformer_engine=to_transformer_engine, + _convert_linear=_convert_linear, + _convert_ln=_convert_ln, + ) + + +def has_transformer_engine_layers(model): + """ + Returns whether a given model has some `transformer_engine` layer or not. + """ + if not is_fp8_available(): + raise ImportError("Using `has_transformer_engine_layers` requires transformer_engine to be installed.") + for m in model.modules(): + if isinstance(m, (te.LayerNorm, te.Linear, te.TransformerLayer)): + return True + return False diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/utils/versions.py b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/versions.py new file mode 100644 index 0000000000000000000000000000000000000000..985c918f0e057bacc70c372f6906071bb73db577 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/utils/versions.py @@ -0,0 +1,56 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib.metadata +from typing import Union + +from packaging.version import Version, parse + +from .constants import STR_OPERATION_TO_FUNC + + +torch_version = parse(importlib.metadata.version("torch")) + + +def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str): + """ + Compares a library version to some requirement using a given operation. + + Args: + library_or_version (`str` or `packaging.version.Version`): + A library name or a version to check. + operation (`str`): + A string representation of an operator, such as `">"` or `"<="`. + requirement_version (`str`): + The version to compare the library version against + """ + if operation not in STR_OPERATION_TO_FUNC.keys(): + raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}") + operation = STR_OPERATION_TO_FUNC[operation] + if isinstance(library_or_version, str): + library_or_version = parse(importlib.metadata.version(library_or_version)) + return operation(library_or_version, parse(requirement_version)) + + +def is_torch_version(operation: str, version: str): + """ + Compares the current PyTorch version to a given reference with an operation. + + Args: + operation (`str`): + A string representation of an operator, such as `">"` or `"<="` + version (`str`): + A string version of PyTorch + """ + return compare_versions(torch_version, operation, version)