applied-ai-018 commited on
Commit
a47af9b
·
verified ·
1 Parent(s): 991d55a

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/14.attention.dense.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step40/zero/14.attention.dense.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step40/zero/14.attention.dense.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step40/zero/14.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  5. ckpts/universal/global_step40/zero/20.post_attention_layernorm.weight/exp_avg.pt +3 -0
  6. venv/lib/python3.10/site-packages/accelerate/commands/config/__init__.py +52 -0
  7. venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/__init__.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/cluster.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_args.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_utils.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/default.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/sagemaker.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/update.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/accelerate/commands/config/cluster.py +705 -0
  16. venv/lib/python3.10/site-packages/accelerate/commands/config/config.py +89 -0
  17. venv/lib/python3.10/site-packages/accelerate/commands/config/config_args.py +243 -0
  18. venv/lib/python3.10/site-packages/accelerate/commands/config/config_utils.py +101 -0
  19. venv/lib/python3.10/site-packages/accelerate/commands/config/default.py +133 -0
  20. venv/lib/python3.10/site-packages/accelerate/commands/config/sagemaker.py +267 -0
  21. venv/lib/python3.10/site-packages/accelerate/commands/config/update.py +63 -0
  22. venv/lib/python3.10/site-packages/accelerate/test_utils/__init__.py +50 -0
  23. venv/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/__init__.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/examples.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/testing.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/training.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/accelerate/test_utils/examples.py +146 -0
  28. venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__init__.py +13 -0
  29. venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/__init__.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_cli.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_distributed_data_loop.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_notebook.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_ops.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_script.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_sync.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__init__.py +13 -0
  37. venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/__init__.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_checkpointing.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_metrics.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_peak_memory_usage.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_performance.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_pippy.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_zero3_integration.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_checkpointing.py +268 -0
  45. venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_metrics.py +306 -0
  46. venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py +282 -0
  47. venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_performance.py +243 -0
  48. venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_pippy.py +129 -0
  49. venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_zero3_integration.py +52 -0
  50. venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_cli.py +26 -0
ckpts/universal/global_step40/zero/14.attention.dense.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02802b1d7f7e3e8e3a211c7169fccc29b0c5ab71d9c178ef9186358256014ae6
3
+ size 16778396
ckpts/universal/global_step40/zero/14.attention.dense.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c22050463f188311e0f48919d64be9c774ef4e3fe86f9878025f734fe38e83ab
3
+ size 16778411
ckpts/universal/global_step40/zero/14.attention.dense.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcc344e77e04b92d1bbd4ebd54e03c06edcbb964e1a62dc6f58225ce0e3f39ee
3
+ size 16778317
ckpts/universal/global_step40/zero/14.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6185adf346244d4cb5de77d168ae74aed16f8d046c739d0256d69ee4337dac3f
3
+ size 33555627
ckpts/universal/global_step40/zero/20.post_attention_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdddc3d8ef6b0f345abb8801d0d5af1a315077892052a6d98b34c09979624f5b
3
+ size 9372
venv/lib/python3.10/site-packages/accelerate/commands/config/__init__.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+
19
+ from .config import config_command_parser
20
+ from .config_args import default_config_file, load_config_from_file # noqa: F401
21
+ from .default import default_command_parser
22
+ from .update import update_command_parser
23
+
24
+
25
+ def get_config_parser(subparsers=None):
26
+ parent_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
27
+ # The main config parser
28
+ config_parser = config_command_parser(subparsers)
29
+ # The subparser to add commands to
30
+ subcommands = config_parser.add_subparsers(title="subcommands", dest="subcommand")
31
+
32
+ # Then add other parsers with the parent parser
33
+ default_command_parser(subcommands, parents=[parent_parser])
34
+ update_command_parser(subcommands, parents=[parent_parser])
35
+
36
+ return config_parser
37
+
38
+
39
+ def main():
40
+ config_parser = get_config_parser()
41
+ args = config_parser.parse_args()
42
+
43
+ if not hasattr(args, "func"):
44
+ config_parser.print_help()
45
+ exit(1)
46
+
47
+ # Run
48
+ args.func(args)
49
+
50
+
51
+ if __name__ == "__main__":
52
+ main()
venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.09 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/cluster.cpython-310.pyc ADDED
Binary file (14.9 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config.cpython-310.pyc ADDED
Binary file (2.44 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_args.cpython-310.pyc ADDED
Binary file (7.14 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_utils.cpython-310.pyc ADDED
Binary file (2.76 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/default.cpython-310.pyc ADDED
Binary file (3.92 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/sagemaker.cpython-310.pyc ADDED
Binary file (6.87 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/update.cpython-310.pyc ADDED
Binary file (1.86 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/commands/config/cluster.py ADDED
@@ -0,0 +1,705 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import os
18
+
19
+ from ...utils import (
20
+ ComputeEnvironment,
21
+ DistributedType,
22
+ is_deepspeed_available,
23
+ is_mlu_available,
24
+ is_mps_available,
25
+ is_npu_available,
26
+ is_transformers_available,
27
+ is_xpu_available,
28
+ )
29
+ from ...utils.constants import (
30
+ DEEPSPEED_MULTINODE_LAUNCHERS,
31
+ FSDP_AUTO_WRAP_POLICY,
32
+ FSDP_BACKWARD_PREFETCH,
33
+ FSDP_SHARDING_STRATEGY,
34
+ FSDP_STATE_DICT_TYPE,
35
+ TORCH_DYNAMO_MODES,
36
+ )
37
+ from .config_args import ClusterConfig
38
+ from .config_utils import (
39
+ DYNAMO_BACKENDS,
40
+ _ask_field,
41
+ _ask_options,
42
+ _convert_distributed_mode,
43
+ _convert_dynamo_backend,
44
+ _convert_mixed_precision,
45
+ _convert_yes_no_to_bool,
46
+ )
47
+
48
+
49
+ def get_cluster_input():
50
+ distributed_type = _ask_options(
51
+ "Which type of machine are you using?",
52
+ ["No distributed training", "multi-CPU", "multi-XPU", "multi-GPU", "multi-NPU", "multi-MLU", "TPU"],
53
+ _convert_distributed_mode,
54
+ )
55
+
56
+ machine_rank = 0
57
+ num_machines = 1
58
+ num_processes = 1
59
+ gpu_ids = None
60
+ main_process_ip = None
61
+ main_process_port = None
62
+ rdzv_backend = "static"
63
+ same_network = True
64
+ debug = False
65
+
66
+ if distributed_type in [
67
+ DistributedType.MULTI_GPU,
68
+ DistributedType.MULTI_MLU,
69
+ DistributedType.MULTI_NPU,
70
+ DistributedType.MULTI_XPU,
71
+ DistributedType.MULTI_CPU,
72
+ ]:
73
+ num_machines = _ask_field(
74
+ "How many different machines will you use (use more than 1 for multi-node training)? [1]: ",
75
+ int,
76
+ default=1,
77
+ )
78
+ if num_machines > 1:
79
+ machine_rank = _ask_options(
80
+ "What is the rank of this machine?",
81
+ list(range(num_machines)),
82
+ int,
83
+ )
84
+ main_process_ip = _ask_field(
85
+ "What is the IP address of the machine that will host the main process? ",
86
+ )
87
+ main_process_port = _ask_field(
88
+ "What is the port you will use to communicate with the main process? ",
89
+ int,
90
+ )
91
+ same_network = _ask_field(
92
+ "Are all the machines on the same local network? Answer `no` if nodes are on the cloud and/or on different network hosts [YES/no]: ",
93
+ _convert_yes_no_to_bool,
94
+ default=True,
95
+ error_message="Please enter yes or no.",
96
+ )
97
+ if not same_network:
98
+ rdzv_backend = _ask_field(
99
+ "What rendezvous backend will you use? ('static', 'c10d', ...): ", default="static"
100
+ )
101
+ debug = _ask_field(
102
+ "Should distributed operations be checked while running for errors? This can avoid timeout issues but will be slower. [yes/NO]: ",
103
+ _convert_yes_no_to_bool,
104
+ default=False,
105
+ error_message="Please enter yes or no.",
106
+ )
107
+
108
+ if distributed_type == DistributedType.NO:
109
+ use_cpu = _ask_field(
110
+ "Do you want to run your training on CPU only (even if a GPU / Apple Silicon / Ascend NPU device is available)? [yes/NO]:",
111
+ _convert_yes_no_to_bool,
112
+ default=False,
113
+ error_message="Please enter yes or no.",
114
+ )
115
+ elif distributed_type == DistributedType.MULTI_CPU:
116
+ use_cpu = True
117
+ else:
118
+ use_cpu = False
119
+
120
+ ipex_config = {}
121
+ mpirun_config = {}
122
+ if use_cpu:
123
+ ipex_config["ipex"] = _ask_field(
124
+ "Do you want to use Intel PyTorch Extension (IPEX) to speed up training on CPU? [yes/NO]:",
125
+ _convert_yes_no_to_bool,
126
+ default=False,
127
+ error_message="Please enter yes or no.",
128
+ )
129
+ if distributed_type == DistributedType.MULTI_CPU:
130
+ use_mpirun = _ask_field(
131
+ "Do you want accelerate to launch mpirun? [yes/NO]: ",
132
+ _convert_yes_no_to_bool,
133
+ default=False,
134
+ error_message="Please enter yes or no.",
135
+ )
136
+ if use_mpirun:
137
+ mpirun_hostfile = _ask_field(
138
+ "Please enter the path to the hostfile to use with mpirun [~/hostfile]: ",
139
+ str,
140
+ default="~/hostfile",
141
+ )
142
+ mpirun_config["mpirun_hostfile"] = os.path.expanduser(mpirun_hostfile.strip())
143
+ mpirun_config["mpirun_ccl"] = _ask_field("Enter the number of oneCCL worker threads [1]: ", default=1)
144
+ if (
145
+ not use_cpu
146
+ and is_xpu_available()
147
+ and distributed_type
148
+ not in [DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU, DistributedType.XLA]
149
+ ):
150
+ ipex_config["use_xpu"] = _ask_field(
151
+ "Do you want to use XPU plugin to speed up training on XPU? [yes/NO]:",
152
+ _convert_yes_no_to_bool,
153
+ default=False,
154
+ error_message="Please enter yes or no.",
155
+ )
156
+
157
+ dynamo_config = {}
158
+ use_dynamo = _ask_field(
159
+ "Do you wish to optimize your script with torch dynamo?[yes/NO]:",
160
+ _convert_yes_no_to_bool,
161
+ default=False,
162
+ error_message="Please enter yes or no.",
163
+ )
164
+ if use_dynamo:
165
+ prefix = "dynamo_"
166
+ dynamo_config[prefix + "backend"] = _ask_options(
167
+ "Which dynamo backend would you like to use?",
168
+ [x.lower() for x in DYNAMO_BACKENDS],
169
+ _convert_dynamo_backend,
170
+ default=2,
171
+ )
172
+ use_custom_options = _ask_field(
173
+ "Do you want to customize the defaults sent to torch.compile? [yes/NO]: ",
174
+ _convert_yes_no_to_bool,
175
+ default=False,
176
+ error_message="Please enter yes or no.",
177
+ )
178
+
179
+ if use_custom_options:
180
+ dynamo_config[prefix + "mode"] = _ask_options(
181
+ "Which mode do you want to use?",
182
+ TORCH_DYNAMO_MODES,
183
+ lambda x: TORCH_DYNAMO_MODES[int(x)],
184
+ default=0,
185
+ )
186
+ dynamo_config[prefix + "use_fullgraph"] = _ask_field(
187
+ "Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ",
188
+ _convert_yes_no_to_bool,
189
+ default=False,
190
+ error_message="Please enter yes or no.",
191
+ )
192
+ dynamo_config[prefix + "use_dynamic"] = _ask_field(
193
+ "Do you want to enable dynamic shape tracing? [yes/NO]: ",
194
+ _convert_yes_no_to_bool,
195
+ default=False,
196
+ error_message="Please enter yes or no.",
197
+ )
198
+
199
+ use_mps = not use_cpu and is_mps_available()
200
+ deepspeed_config = {}
201
+ if (
202
+ distributed_type
203
+ in [
204
+ DistributedType.MULTI_GPU,
205
+ DistributedType.MULTI_XPU,
206
+ DistributedType.MULTI_NPU,
207
+ DistributedType.MULTI_MLU,
208
+ DistributedType.NO,
209
+ ]
210
+ and not use_mps
211
+ ):
212
+ use_deepspeed = _ask_field(
213
+ "Do you want to use DeepSpeed? [yes/NO]: ",
214
+ _convert_yes_no_to_bool,
215
+ default=False,
216
+ error_message="Please enter yes or no.",
217
+ )
218
+ if use_deepspeed:
219
+ distributed_type = DistributedType.DEEPSPEED
220
+ assert (
221
+ is_deepspeed_available()
222
+ ), "DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source"
223
+
224
+ if distributed_type == DistributedType.DEEPSPEED:
225
+ use_deepspeed_config = _ask_field(
226
+ "Do you want to specify a json file to a DeepSpeed config? [yes/NO]: ",
227
+ _convert_yes_no_to_bool,
228
+ default=False,
229
+ error_message="Please enter yes or no.",
230
+ )
231
+ if use_deepspeed_config:
232
+ deepspeed_config["deepspeed_config_file"] = _ask_field(
233
+ "Please enter the path to the json DeepSpeed config file: ",
234
+ str,
235
+ default="none",
236
+ )
237
+ else:
238
+ deepspeed_config["zero_stage"] = _ask_options(
239
+ "What should be your DeepSpeed's ZeRO optimization stage?",
240
+ [0, 1, 2, 3],
241
+ int,
242
+ default=2,
243
+ )
244
+
245
+ deepspeed_devices = ["none", "cpu", "nvme"]
246
+ if deepspeed_config["zero_stage"] >= 2:
247
+ deepspeed_config["offload_optimizer_device"] = _ask_options(
248
+ "Where to offload optimizer states?", deepspeed_devices, lambda x: deepspeed_devices[int(x)]
249
+ )
250
+ deepspeed_config["offload_param_device"] = _ask_options(
251
+ "Where to offload parameters?", deepspeed_devices, lambda x: deepspeed_devices[int(x)]
252
+ )
253
+ if deepspeed_config["offload_param_device"] == "nvme":
254
+ deepspeed_config["offload_param_nvme_path"] = _ask_field(
255
+ "Nvme Path to offload parameters?",
256
+ str,
257
+ default="/nvme",
258
+ )
259
+ if deepspeed_config["offload_optimizer_device"] == "nvme":
260
+ deepspeed_config["offload_optimizer_nvme_path"] = _ask_field(
261
+ "Nvme Path to offload optimizer states?",
262
+ str,
263
+ default="/nvme",
264
+ )
265
+ deepspeed_config["gradient_accumulation_steps"] = _ask_field(
266
+ "How many gradient accumulation steps you're passing in your script? [1]: ",
267
+ int,
268
+ default=1,
269
+ )
270
+ use_gradient_clipping = _ask_field(
271
+ "Do you want to use gradient clipping? [yes/NO]: ",
272
+ _convert_yes_no_to_bool,
273
+ default=False,
274
+ error_message="Please enter yes or no.",
275
+ )
276
+ if use_gradient_clipping:
277
+ deepspeed_config["gradient_clipping"] = _ask_field(
278
+ "What is the gradient clipping value? [1.0]: ",
279
+ float,
280
+ default=1.0,
281
+ )
282
+ if deepspeed_config["zero_stage"] == 3:
283
+ deepspeed_config["zero3_save_16bit_model"] = _ask_field(
284
+ "Do you want to save 16-bit model weights when using ZeRO Stage-3? [yes/NO]: ",
285
+ _convert_yes_no_to_bool,
286
+ default=False,
287
+ error_message="Please enter yes or no.",
288
+ )
289
+ deepspeed_config["zero3_init_flag"] = _ask_field(
290
+ "Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: ",
291
+ _convert_yes_no_to_bool,
292
+ default=False,
293
+ error_message="Please enter yes or no.",
294
+ )
295
+ if deepspeed_config["zero3_init_flag"]:
296
+ if not is_transformers_available():
297
+ raise Exception(
298
+ "When `zero3_init_flag` is set, it requires Transformers to be installed. "
299
+ "Please run `pip3 install transformers`."
300
+ )
301
+
302
+ if num_machines > 1:
303
+ launcher_query = "Which Type of launcher do you want to use?"
304
+ deepspeed_config["deepspeed_multinode_launcher"] = _ask_options(
305
+ launcher_query,
306
+ DEEPSPEED_MULTINODE_LAUNCHERS,
307
+ lambda x: DEEPSPEED_MULTINODE_LAUNCHERS[int(x)],
308
+ )
309
+
310
+ if deepspeed_config["deepspeed_multinode_launcher"] != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
311
+ deepspeed_config["deepspeed_hostfile"] = _ask_field(
312
+ "DeepSpeed configures multi-node compute resources with hostfile. "
313
+ "Each row is of the format `hostname slots=[num_gpus]`, e.g., `localhost slots=2`; "
314
+ "for more information please refer official [documentation]"
315
+ "(https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node). "
316
+ "Please specify the location of hostfile: ",
317
+ str,
318
+ )
319
+
320
+ is_exclusion_filter = _ask_field(
321
+ "Do you want to specify exclusion filter string? [yes/NO]: ",
322
+ _convert_yes_no_to_bool,
323
+ default=False,
324
+ error_message="Please enter yes or no.",
325
+ )
326
+ if is_exclusion_filter:
327
+ deepspeed_config["deepspeed_exclusion_filter"] = _ask_field(
328
+ "DeepSpeed exclusion filter string: ",
329
+ str,
330
+ )
331
+
332
+ is_inclusion_filter = _ask_field(
333
+ "Do you want to specify inclusion filter string? [yes/NO]: ",
334
+ _convert_yes_no_to_bool,
335
+ default=False,
336
+ error_message="Please enter yes or no.",
337
+ )
338
+ if is_inclusion_filter:
339
+ deepspeed_config["deepspeed_inclusion_filter"] = _ask_field(
340
+ "DeepSpeed inclusion filter string: ",
341
+ str,
342
+ )
343
+
344
+ fsdp_config = {}
345
+ if distributed_type in [
346
+ DistributedType.MULTI_GPU,
347
+ DistributedType.MULTI_NPU,
348
+ DistributedType.MULTI_MLU,
349
+ DistributedType.MULTI_XPU,
350
+ ]:
351
+ use_fsdp = _ask_field(
352
+ "Do you want to use FullyShardedDataParallel? [yes/NO]: ",
353
+ _convert_yes_no_to_bool,
354
+ default=False,
355
+ error_message="Please enter yes or no.",
356
+ )
357
+ if use_fsdp:
358
+ distributed_type = DistributedType.FSDP
359
+ if distributed_type == DistributedType.FSDP:
360
+ sharding_strategy_query = "What should be your sharding strategy?"
361
+ fsdp_config["fsdp_sharding_strategy"] = _ask_options(
362
+ sharding_strategy_query,
363
+ FSDP_SHARDING_STRATEGY,
364
+ lambda x: FSDP_SHARDING_STRATEGY[int(x)],
365
+ )
366
+ fsdp_config["fsdp_offload_params"] = _ask_field(
367
+ "Do you want to offload parameters and gradients to CPU? [yes/NO]: ",
368
+ _convert_yes_no_to_bool,
369
+ default=False,
370
+ error_message="Please enter yes or no.",
371
+ )
372
+ fsdp_wrap_query = "What should be your auto wrap policy?"
373
+ fsdp_config["fsdp_auto_wrap_policy"] = _ask_options(
374
+ fsdp_wrap_query,
375
+ FSDP_AUTO_WRAP_POLICY,
376
+ lambda x: FSDP_AUTO_WRAP_POLICY[int(x)],
377
+ )
378
+ if fsdp_config["fsdp_auto_wrap_policy"] == FSDP_AUTO_WRAP_POLICY[0]:
379
+ use_no_split_modules = _ask_field(
380
+ "Do you want to use the model's `_no_split_modules` to wrap. Only applicable for 🤗 Transformers [yes/NO]: ",
381
+ _convert_yes_no_to_bool,
382
+ default=False,
383
+ error_message="Please enter yes or no.",
384
+ )
385
+ if not use_no_split_modules:
386
+ fsdp_config["fsdp_transformer_layer_cls_to_wrap"] = _ask_field(
387
+ "Specify the comma-separated list of transformer layer class names (case-sensitive) to wrap ,e.g, :"
388
+ "`BertLayer`, `GPTJBlock`, `T5Block`, `BertLayer,BertEmbeddings,BertSelfOutput` ...? : ",
389
+ str,
390
+ )
391
+ elif fsdp_config["fsdp_auto_wrap_policy"] == FSDP_AUTO_WRAP_POLICY[1]:
392
+ fsdp_config["fsdp_min_num_params"] = _ask_field(
393
+ "What should be your FSDP's minimum number of parameters for Default Auto Wrapping Policy? [1e8]: ",
394
+ int,
395
+ default=100000000,
396
+ )
397
+ fsdp_backward_prefetch_query = "What should be your FSDP's backward prefetch policy?"
398
+ fsdp_config["fsdp_backward_prefetch"] = _ask_options(
399
+ fsdp_backward_prefetch_query,
400
+ FSDP_BACKWARD_PREFETCH,
401
+ lambda x: FSDP_BACKWARD_PREFETCH[int(x)],
402
+ )
403
+ fsdp_state_dict_type_query = "What should be your FSDP's state dict type?"
404
+ fsdp_config["fsdp_state_dict_type"] = _ask_options(
405
+ fsdp_state_dict_type_query,
406
+ FSDP_STATE_DICT_TYPE,
407
+ lambda x: FSDP_STATE_DICT_TYPE[int(x)],
408
+ default=2,
409
+ )
410
+ fsdp_config["fsdp_forward_prefetch"] = _ask_field(
411
+ "Do you want to enable FSDP's forward prefetch policy? [yes/NO]: ",
412
+ _convert_yes_no_to_bool,
413
+ default=False,
414
+ error_message="Please enter yes or no.",
415
+ )
416
+ fsdp_config["fsdp_use_orig_params"] = _ask_field(
417
+ "Do you want to enable FSDP's `use_orig_params` feature? [YES/no]: ",
418
+ _convert_yes_no_to_bool,
419
+ default=True,
420
+ error_message="Please enter yes or no.",
421
+ )
422
+ fsdp_config["fsdp_cpu_ram_efficient_loading"] = _ask_field(
423
+ "Do you want to enable CPU RAM efficient model loading? Only applicable for 🤗 Transformers models. [YES/no]: ",
424
+ _convert_yes_no_to_bool,
425
+ default=True,
426
+ error_message="Please enter yes or no.",
427
+ )
428
+ if fsdp_config["fsdp_cpu_ram_efficient_loading"]:
429
+ fsdp_config["fsdp_sync_module_states"] = True
430
+ else:
431
+ fsdp_config["fsdp_sync_module_states"] = _ask_field(
432
+ "Do you want each individually wrapped FSDP unit to broadcast module parameters from rank 0 at the start? [YES/no]: ",
433
+ _convert_yes_no_to_bool,
434
+ default=True,
435
+ error_message="Please enter yes or no.",
436
+ )
437
+
438
+ megatron_lm_config = {}
439
+ if distributed_type in [DistributedType.MULTI_GPU]:
440
+ use_megatron_lm = _ask_field(
441
+ "Do you want to use Megatron-LM ? [yes/NO]: ",
442
+ _convert_yes_no_to_bool,
443
+ default=False,
444
+ error_message="Please enter yes or no.",
445
+ )
446
+ if use_megatron_lm:
447
+ distributed_type = DistributedType.MEGATRON_LM
448
+ if distributed_type == DistributedType.MEGATRON_LM:
449
+ prefix = "megatron_lm_"
450
+ megatron_lm_config[prefix + "tp_degree"] = _ask_field(
451
+ "What is the Tensor Parallelism degree/size? [1]:",
452
+ int,
453
+ default=1,
454
+ error_message="Please enter an integer.",
455
+ )
456
+ if megatron_lm_config[prefix + "tp_degree"] > 1:
457
+ megatron_lm_config[prefix + "sequence_parallelism"] = _ask_field(
458
+ "Do you want to enable Sequence Parallelism? [YES/no]: ",
459
+ _convert_yes_no_to_bool,
460
+ default=True,
461
+ error_message="Please enter yes or no.",
462
+ )
463
+
464
+ megatron_lm_config[prefix + "pp_degree"] = _ask_field(
465
+ "What is the Pipeline Parallelism degree/size? [1]:",
466
+ int,
467
+ default=1,
468
+ error_message="Please enter an integer.",
469
+ )
470
+ if megatron_lm_config[prefix + "pp_degree"] > 1:
471
+ megatron_lm_config[prefix + "num_micro_batches"] = _ask_field(
472
+ "What is the number of micro-batches? [1]:",
473
+ int,
474
+ default=1,
475
+ error_message="Please enter an integer.",
476
+ )
477
+
478
+ megatron_lm_config[prefix + "recompute_activations"] = _ask_field(
479
+ "Do you want to enable selective activation recomputation? [YES/no]: ",
480
+ _convert_yes_no_to_bool,
481
+ default=True,
482
+ error_message="Please enter yes or no.",
483
+ )
484
+
485
+ megatron_lm_config[prefix + "use_distributed_optimizer"] = _ask_field(
486
+ "Do you want to use distributed optimizer "
487
+ "which shards optimizer state and gradients across data parallel ranks? [YES/no]: ",
488
+ _convert_yes_no_to_bool,
489
+ default=True,
490
+ error_message="Please enter yes or no.",
491
+ )
492
+
493
+ megatron_lm_config[prefix + "gradient_clipping"] = _ask_field(
494
+ "What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]: ",
495
+ float,
496
+ default=1.0,
497
+ )
498
+ # TPU specific defaults
499
+ tpu_commands = None
500
+ tpu_command_file = None
501
+ tpu_downcast_bf16 = "no"
502
+ tpu_env = []
503
+ tpu_name = None
504
+ tpu_vm = None
505
+ tpu_zone = None
506
+ tpu_use_sudo = False
507
+ tpu_use_cluster = False
508
+
509
+ if distributed_type in [
510
+ DistributedType.MULTI_CPU,
511
+ DistributedType.MULTI_XPU,
512
+ DistributedType.MULTI_GPU,
513
+ DistributedType.MULTI_MLU,
514
+ DistributedType.MULTI_NPU,
515
+ DistributedType.XLA,
516
+ ]:
517
+ machine_type = str(distributed_type).split(".")[1].replace("MULTI_", "")
518
+ if machine_type == "TPU":
519
+ machine_type += " cores"
520
+ elif machine_type == "CPU":
521
+ machine_type = "processes"
522
+ else:
523
+ machine_type += "(s)"
524
+ num_processes = _ask_field(
525
+ f"How many {machine_type} should be used for distributed training? [1]:",
526
+ int,
527
+ default=1,
528
+ error_message="Please enter an integer.",
529
+ )
530
+ elif distributed_type in [DistributedType.FSDP, DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]:
531
+ num_processes = _ask_field(
532
+ "How many GPU(s) should be used for distributed training? [1]:",
533
+ int,
534
+ default=1,
535
+ error_message="Please enter an integer.",
536
+ )
537
+ else:
538
+ num_processes = 1
539
+
540
+ if (distributed_type == DistributedType.MULTI_GPU) and (num_machines == 1) and (num_processes == 1):
541
+ raise ValueError(
542
+ f"Specified distributed type {distributed_type} but only using 1 GPU on a single machine. Please select `No distributed training` for the type of machine you are using."
543
+ )
544
+
545
+ if (
546
+ distributed_type
547
+ in [
548
+ DistributedType.MULTI_GPU,
549
+ DistributedType.MULTI_MLU,
550
+ DistributedType.MULTI_NPU,
551
+ DistributedType.MULTI_XPU,
552
+ DistributedType.NO,
553
+ ]
554
+ and not use_cpu
555
+ and not use_mps
556
+ ):
557
+ if is_npu_available():
558
+ machine_type = "NPU(s)"
559
+ elif is_mlu_available():
560
+ machine_type = "MLU(s)"
561
+ else:
562
+ machine_type = "GPU(s)"
563
+ gpu_ids = _ask_field(
564
+ f"What {machine_type} (by id) should be used for training on this machine as a comma-seperated list? [all]:",
565
+ default="all",
566
+ )
567
+
568
+ # CPU affinity is only supported on NVIDIA hardware for now
569
+ enable_cpu_affinity = False
570
+ if distributed_type == (DistributedType.NO, DistributedType.MULTI_GPU) and not use_cpu and not use_mps:
571
+ enable_cpu_affinity = _ask_field(
572
+ "Would you like to enable numa efficiency? (Currently only supported on NVIDIA hardware). [yes/NO]: ",
573
+ _convert_yes_no_to_bool,
574
+ default=False,
575
+ error_message="Please enter yes or no.",
576
+ )
577
+
578
+ if distributed_type == DistributedType.XLA:
579
+ mixed_precision = "no"
580
+ main_training_function = _ask_field(
581
+ "What is the name of the function in your script that should be launched in all parallel scripts? [main]: ",
582
+ default="main",
583
+ )
584
+ tpu_use_cluster = _ask_field(
585
+ "Are you using a TPU cluster? [yes/NO]: ",
586
+ _convert_yes_no_to_bool,
587
+ default=False,
588
+ error_message="Please enter yes or no.",
589
+ )
590
+ if tpu_use_cluster:
591
+ tpu_name = _ask_field(
592
+ "What is the name of your TPU cluster? ",
593
+ default=None,
594
+ error_message="Please enter the name of your TPU cluster.",
595
+ )
596
+ tpu_zone = _ask_field(
597
+ "What is the zone of your TPU cluster? ",
598
+ default=None,
599
+ error_message="Please enter the zone of your TPU cluster.",
600
+ )
601
+ tpu_use_sudo = _ask_field(
602
+ "To run a python script in a TPU pod, should `sudo` be used? [yes/NO]: ",
603
+ default=False,
604
+ error_message="Please enter yes or no.",
605
+ )
606
+ run_commands = _ask_field(
607
+ "Do you have code you wish to run on startup in each pod? [yes/NO]: ",
608
+ _convert_yes_no_to_bool,
609
+ default=False,
610
+ error_message="Please enter yes or no.",
611
+ )
612
+ if run_commands:
613
+ use_command_file = _ask_field(
614
+ "Is this code located in a bash script? [yes/NO]: ",
615
+ _convert_yes_no_to_bool,
616
+ default=False,
617
+ error_message="Please enter yes or no.",
618
+ )
619
+ if use_command_file:
620
+ tpu_command_file = _ask_field(
621
+ "What is the path to your bash script? ",
622
+ default=None,
623
+ error_message="Please enter the path to your bash script.",
624
+ )
625
+ tpu_command_file = os.path.abspath(tpu_command_file)
626
+ else:
627
+ print("Please enter each command seperately you wish to run on startup in each pod.")
628
+ tpu_commands = []
629
+ another_command = True
630
+ while another_command:
631
+ tpu_commands.append(
632
+ _ask_field(
633
+ "Please enter a single command to be ran ",
634
+ default=None,
635
+ error_message="Please enter the commands you wish to run on startup in each pod as a single string.",
636
+ )
637
+ )
638
+ another_command = _ask_field(
639
+ "Do you wish to add another command? [yes/NO]: ",
640
+ _convert_yes_no_to_bool,
641
+ default=False,
642
+ error_message="Please enter yes or no.",
643
+ )
644
+ tpu_vm = _ask_field(
645
+ "If not using an instance group, what are the names of the Compute VM instances to be used, seperated by a comma: ",
646
+ default="",
647
+ ).split(",")
648
+ tpu_env = _ask_field(
649
+ "What environment variables do you wish to set in each pod, seperated by a comma: ",
650
+ default="",
651
+ ).split(",")
652
+
653
+ else:
654
+ main_training_function = "main"
655
+ if distributed_type == DistributedType.DEEPSPEED and use_deepspeed_config:
656
+ mixed_precision = None
657
+ else:
658
+ mixed_precision = _ask_options(
659
+ "Do you wish to use FP16 or BF16 (mixed precision)?",
660
+ ["no", "fp16", "bf16", "fp8"],
661
+ _convert_mixed_precision,
662
+ )
663
+
664
+ if use_dynamo and mixed_precision == "no" and not use_cpu:
665
+ print(
666
+ "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts."
667
+ )
668
+
669
+ if distributed_type == DistributedType.XLA and mixed_precision == "bf16":
670
+ tpu_downcast_bf16 = _ask_field(
671
+ "Should `torch.float` be cast as `bfloat16` and `torch.double` remain `float32` on TPUs?", default="no"
672
+ )
673
+
674
+ return ClusterConfig(
675
+ compute_environment=ComputeEnvironment.LOCAL_MACHINE,
676
+ distributed_type=distributed_type,
677
+ num_processes=num_processes,
678
+ gpu_ids=gpu_ids,
679
+ mixed_precision=mixed_precision,
680
+ downcast_bf16=tpu_downcast_bf16,
681
+ machine_rank=machine_rank,
682
+ num_machines=num_machines,
683
+ main_process_ip=main_process_ip,
684
+ main_process_port=main_process_port,
685
+ main_training_function=main_training_function,
686
+ deepspeed_config=deepspeed_config,
687
+ fsdp_config=fsdp_config,
688
+ megatron_lm_config=megatron_lm_config,
689
+ ipex_config=ipex_config,
690
+ mpirun_config=mpirun_config,
691
+ use_cpu=use_cpu,
692
+ rdzv_backend=rdzv_backend,
693
+ same_network=same_network,
694
+ commands=tpu_commands,
695
+ command_file=tpu_command_file,
696
+ tpu_env=tpu_env,
697
+ tpu_name=tpu_name,
698
+ tpu_vm=tpu_vm,
699
+ tpu_zone=tpu_zone,
700
+ tpu_use_sudo=tpu_use_sudo,
701
+ tpu_use_cluster=tpu_use_cluster,
702
+ dynamo_config=dynamo_config,
703
+ debug=debug,
704
+ enable_cpu_affinity=enable_cpu_affinity,
705
+ )
venv/lib/python3.10/site-packages/accelerate/commands/config/config.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+ import os
19
+
20
+ from accelerate.utils import ComputeEnvironment
21
+
22
+ from .cluster import get_cluster_input
23
+ from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
24
+ from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
25
+ from .sagemaker import get_sagemaker_input
26
+
27
+
28
+ description = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
29
+
30
+
31
+ def get_user_input():
32
+ compute_environment = _ask_options(
33
+ "In which compute environment are you running?",
34
+ ["This machine", "AWS (Amazon SageMaker)"],
35
+ _convert_compute_environment,
36
+ )
37
+ if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
38
+ config = get_sagemaker_input()
39
+ else:
40
+ config = get_cluster_input()
41
+ return config
42
+
43
+
44
+ def config_command_parser(subparsers=None):
45
+ if subparsers is not None:
46
+ parser = subparsers.add_parser("config", description=description)
47
+ else:
48
+ parser = argparse.ArgumentParser("Accelerate config command", description=description)
49
+
50
+ parser.add_argument(
51
+ "--config_file",
52
+ default=None,
53
+ help=(
54
+ "The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
55
+ "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
56
+ "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
57
+ "with 'huggingface'."
58
+ ),
59
+ )
60
+
61
+ if subparsers is not None:
62
+ parser.set_defaults(func=config_command)
63
+ return parser
64
+
65
+
66
+ def config_command(args):
67
+ config = get_user_input()
68
+ if args.config_file is not None:
69
+ config_file = args.config_file
70
+ else:
71
+ if not os.path.isdir(cache_dir):
72
+ os.makedirs(cache_dir)
73
+ config_file = default_yaml_config_file
74
+
75
+ if config_file.endswith(".json"):
76
+ config.to_json_file(config_file)
77
+ else:
78
+ config.to_yaml_file(config_file)
79
+ print(f"accelerate configuration saved at {config_file}")
80
+
81
+
82
+ def main():
83
+ parser = config_command_parser()
84
+ args = parser.parse_args()
85
+ config_command(args)
86
+
87
+
88
+ if __name__ == "__main__":
89
+ main()
venv/lib/python3.10/site-packages/accelerate/commands/config/config_args.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import json
18
+ import os
19
+ from dataclasses import dataclass
20
+ from enum import Enum
21
+ from typing import List, Optional, Union
22
+
23
+ import yaml
24
+
25
+ from ...utils import ComputeEnvironment, DistributedType, SageMakerDistributedType
26
+ from ...utils.constants import SAGEMAKER_PYTHON_VERSION, SAGEMAKER_PYTORCH_VERSION, SAGEMAKER_TRANSFORMERS_VERSION
27
+
28
+
29
+ hf_cache_home = os.path.expanduser(
30
+ os.environ.get("HF_HOME", os.path.join(os.environ.get("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
31
+ )
32
+ cache_dir = os.path.join(hf_cache_home, "accelerate")
33
+ default_json_config_file = os.path.join(cache_dir, "default_config.yaml")
34
+ default_yaml_config_file = os.path.join(cache_dir, "default_config.yaml")
35
+
36
+ # For backward compatibility: the default config is the json one if it's the only existing file.
37
+ if os.path.isfile(default_yaml_config_file) or not os.path.isfile(default_json_config_file):
38
+ default_config_file = default_yaml_config_file
39
+ else:
40
+ default_config_file = default_json_config_file
41
+
42
+
43
+ def load_config_from_file(config_file):
44
+ if config_file is not None:
45
+ if not os.path.isfile(config_file):
46
+ raise FileNotFoundError(
47
+ f"The passed configuration file `{config_file}` does not exist. "
48
+ "Please pass an existing file to `accelerate launch`, or use the default one "
49
+ "created through `accelerate config` and run `accelerate launch` "
50
+ "without the `--config_file` argument."
51
+ )
52
+ else:
53
+ config_file = default_config_file
54
+ with open(config_file, encoding="utf-8") as f:
55
+ if config_file.endswith(".json"):
56
+ if (
57
+ json.load(f).get("compute_environment", ComputeEnvironment.LOCAL_MACHINE)
58
+ == ComputeEnvironment.LOCAL_MACHINE
59
+ ):
60
+ config_class = ClusterConfig
61
+ else:
62
+ config_class = SageMakerConfig
63
+ return config_class.from_json_file(json_file=config_file)
64
+ else:
65
+ if (
66
+ yaml.safe_load(f).get("compute_environment", ComputeEnvironment.LOCAL_MACHINE)
67
+ == ComputeEnvironment.LOCAL_MACHINE
68
+ ):
69
+ config_class = ClusterConfig
70
+ else:
71
+ config_class = SageMakerConfig
72
+ return config_class.from_yaml_file(yaml_file=config_file)
73
+
74
+
75
+ @dataclass
76
+ class BaseConfig:
77
+ compute_environment: ComputeEnvironment
78
+ distributed_type: Union[DistributedType, SageMakerDistributedType]
79
+ mixed_precision: str
80
+ use_cpu: bool
81
+ debug: bool
82
+
83
+ def to_dict(self):
84
+ result = self.__dict__
85
+ # For serialization, it's best to convert Enums to strings (or their underlying value type).
86
+ for key, value in result.items():
87
+ if isinstance(value, Enum):
88
+ result[key] = value.value
89
+ if isinstance(value, dict) and not bool(value):
90
+ result[key] = None
91
+ result = {k: v for k, v in result.items() if v is not None}
92
+ return result
93
+
94
+ @classmethod
95
+ def from_json_file(cls, json_file=None):
96
+ json_file = default_json_config_file if json_file is None else json_file
97
+ with open(json_file, encoding="utf-8") as f:
98
+ config_dict = json.load(f)
99
+ if "compute_environment" not in config_dict:
100
+ config_dict["compute_environment"] = ComputeEnvironment.LOCAL_MACHINE
101
+ if "mixed_precision" not in config_dict:
102
+ config_dict["mixed_precision"] = "fp16" if ("fp16" in config_dict and config_dict["fp16"]) else None
103
+ if "fp16" in config_dict: # Convert the config to the new format.
104
+ del config_dict["fp16"]
105
+ if "dynamo_backend" in config_dict: # Convert the config to the new format.
106
+ dynamo_backend = config_dict.pop("dynamo_backend")
107
+ config_dict["dynamo_config"] = {} if dynamo_backend == "NO" else {"dynamo_backend": dynamo_backend}
108
+ if "use_cpu" not in config_dict:
109
+ config_dict["use_cpu"] = False
110
+ if "debug" not in config_dict:
111
+ config_dict["debug"] = False
112
+ if "enable_cpu_affinity" not in config_dict:
113
+ config_dict["enable_cpu_affinity"] = False
114
+ extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys()))
115
+ if len(extra_keys) > 0:
116
+ raise ValueError(
117
+ f"The config file at {json_file} had unknown keys ({extra_keys}), please try upgrading your `accelerate`"
118
+ " version or fix (and potentially remove) these keys from your config file."
119
+ )
120
+
121
+ return cls(**config_dict)
122
+
123
+ def to_json_file(self, json_file):
124
+ with open(json_file, "w", encoding="utf-8") as f:
125
+ content = json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
126
+ f.write(content)
127
+
128
+ @classmethod
129
+ def from_yaml_file(cls, yaml_file=None):
130
+ yaml_file = default_yaml_config_file if yaml_file is None else yaml_file
131
+ with open(yaml_file, encoding="utf-8") as f:
132
+ config_dict = yaml.safe_load(f)
133
+ if "compute_environment" not in config_dict:
134
+ config_dict["compute_environment"] = ComputeEnvironment.LOCAL_MACHINE
135
+ if "mixed_precision" not in config_dict:
136
+ config_dict["mixed_precision"] = "fp16" if ("fp16" in config_dict and config_dict["fp16"]) else None
137
+ if isinstance(config_dict["mixed_precision"], bool) and not config_dict["mixed_precision"]:
138
+ config_dict["mixed_precision"] = "no"
139
+ if "fp16" in config_dict: # Convert the config to the new format.
140
+ del config_dict["fp16"]
141
+ if "dynamo_backend" in config_dict: # Convert the config to the new format.
142
+ dynamo_backend = config_dict.pop("dynamo_backend")
143
+ config_dict["dynamo_config"] = {} if dynamo_backend == "NO" else {"dynamo_backend": dynamo_backend}
144
+ if "use_cpu" not in config_dict:
145
+ config_dict["use_cpu"] = False
146
+ if "debug" not in config_dict:
147
+ config_dict["debug"] = False
148
+ if "enable_cpu_affinity" not in config_dict:
149
+ config_dict["enable_cpu_affinity"] = False
150
+ extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys()))
151
+ if len(extra_keys) > 0:
152
+ raise ValueError(
153
+ f"The config file at {yaml_file} had unknown keys ({extra_keys}), please try upgrading your `accelerate`"
154
+ " version or fix (and potentially remove) these keys from your config file."
155
+ )
156
+ return cls(**config_dict)
157
+
158
+ def to_yaml_file(self, yaml_file):
159
+ with open(yaml_file, "w", encoding="utf-8") as f:
160
+ yaml.safe_dump(self.to_dict(), f)
161
+
162
+ def __post_init__(self):
163
+ if isinstance(self.compute_environment, str):
164
+ self.compute_environment = ComputeEnvironment(self.compute_environment)
165
+ if isinstance(self.distributed_type, str):
166
+ if self.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
167
+ self.distributed_type = SageMakerDistributedType(self.distributed_type)
168
+ else:
169
+ self.distributed_type = DistributedType(self.distributed_type)
170
+ if getattr(self, "dynamo_config", None) is None:
171
+ self.dynamo_config = {}
172
+
173
+
174
+ @dataclass
175
+ class ClusterConfig(BaseConfig):
176
+ num_processes: int
177
+ machine_rank: int = 0
178
+ num_machines: int = 1
179
+ gpu_ids: Optional[str] = None
180
+ main_process_ip: Optional[str] = None
181
+ main_process_port: Optional[int] = None
182
+ rdzv_backend: Optional[str] = "static"
183
+ same_network: Optional[bool] = False
184
+ main_training_function: str = "main"
185
+ enable_cpu_affinity: bool = False
186
+
187
+ # args for deepspeed_plugin
188
+ deepspeed_config: dict = None
189
+ # args for fsdp
190
+ fsdp_config: dict = None
191
+ # args for megatron_lm
192
+ megatron_lm_config: dict = None
193
+ # args for ipex
194
+ ipex_config: dict = None
195
+ # args for mpirun
196
+ mpirun_config: dict = None
197
+ # args for TPU
198
+ downcast_bf16: bool = False
199
+
200
+ # args for TPU pods
201
+ tpu_name: str = None
202
+ tpu_zone: str = None
203
+ tpu_use_cluster: bool = False
204
+ tpu_use_sudo: bool = False
205
+ command_file: str = None
206
+ commands: List[str] = None
207
+ tpu_vm: List[str] = None
208
+ tpu_env: List[str] = None
209
+
210
+ # args for dynamo
211
+ dynamo_config: dict = None
212
+
213
+ def __post_init__(self):
214
+ if self.deepspeed_config is None:
215
+ self.deepspeed_config = {}
216
+ if self.fsdp_config is None:
217
+ self.fsdp_config = {}
218
+ if self.megatron_lm_config is None:
219
+ self.megatron_lm_config = {}
220
+ if self.ipex_config is None:
221
+ self.ipex_config = {}
222
+ if self.mpirun_config is None:
223
+ self.mpirun_config = {}
224
+ return super().__post_init__()
225
+
226
+
227
+ @dataclass
228
+ class SageMakerConfig(BaseConfig):
229
+ ec2_instance_type: str
230
+ iam_role_name: str
231
+ image_uri: Optional[str] = None
232
+ profile: Optional[str] = None
233
+ region: str = "us-east-1"
234
+ num_machines: int = 1
235
+ gpu_ids: str = "all"
236
+ base_job_name: str = f"accelerate-sagemaker-{num_machines}"
237
+ pytorch_version: str = SAGEMAKER_PYTORCH_VERSION
238
+ transformers_version: str = SAGEMAKER_TRANSFORMERS_VERSION
239
+ py_version: str = SAGEMAKER_PYTHON_VERSION
240
+ sagemaker_inputs_file: str = None
241
+ sagemaker_metrics_file: str = None
242
+ additional_args: dict = None
243
+ dynamo_config: dict = None
venv/lib/python3.10/site-packages/accelerate/commands/config/config_utils.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+
19
+ from ...utils.dataclasses import (
20
+ ComputeEnvironment,
21
+ DistributedType,
22
+ DynamoBackend,
23
+ PrecisionType,
24
+ SageMakerDistributedType,
25
+ )
26
+ from ..menu import BulletMenu
27
+
28
+
29
+ DYNAMO_BACKENDS = [
30
+ "EAGER",
31
+ "AOT_EAGER",
32
+ "INDUCTOR",
33
+ "AOT_TS_NVFUSER",
34
+ "NVPRIMS_NVFUSER",
35
+ "CUDAGRAPHS",
36
+ "OFI",
37
+ "FX2TRT",
38
+ "ONNXRT",
39
+ "TENSORRT",
40
+ "IPEX",
41
+ "TVM",
42
+ ]
43
+
44
+
45
+ def _ask_field(input_text, convert_value=None, default=None, error_message=None):
46
+ ask_again = True
47
+ while ask_again:
48
+ result = input(input_text)
49
+ try:
50
+ if default is not None and len(result) == 0:
51
+ return default
52
+ return convert_value(result) if convert_value is not None else result
53
+ except Exception:
54
+ if error_message is not None:
55
+ print(error_message)
56
+
57
+
58
+ def _ask_options(input_text, options=[], convert_value=None, default=0):
59
+ menu = BulletMenu(input_text, options)
60
+ result = menu.run(default_choice=default)
61
+ return convert_value(result) if convert_value is not None else result
62
+
63
+
64
+ def _convert_compute_environment(value):
65
+ value = int(value)
66
+ return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value])
67
+
68
+
69
+ def _convert_distributed_mode(value):
70
+ value = int(value)
71
+ return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "MULTI_MLU", "XLA"][value])
72
+
73
+
74
+ def _convert_dynamo_backend(value):
75
+ value = int(value)
76
+ return DynamoBackend(DYNAMO_BACKENDS[value]).value
77
+
78
+
79
+ def _convert_mixed_precision(value):
80
+ value = int(value)
81
+ return PrecisionType(["no", "fp16", "bf16", "fp8"][value])
82
+
83
+
84
+ def _convert_sagemaker_distributed_mode(value):
85
+ value = int(value)
86
+ return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value])
87
+
88
+
89
+ def _convert_yes_no_to_bool(value):
90
+ return {"yes": True, "no": False}[value.lower()]
91
+
92
+
93
+ class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter):
94
+ """
95
+ A custom formatter that will remove the usage line from the help message for subcommands.
96
+ """
97
+
98
+ def _format_usage(self, usage, actions, groups, prefix):
99
+ usage = super()._format_usage(usage, actions, groups, prefix)
100
+ usage = usage.replace("<command> [<args>] ", "")
101
+ return usage
venv/lib/python3.10/site-packages/accelerate/commands/config/default.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from pathlib import Path
18
+
19
+ import torch
20
+
21
+ from ...utils import is_mlu_available, is_npu_available, is_xpu_available
22
+ from .config_args import ClusterConfig, default_json_config_file
23
+ from .config_utils import SubcommandHelpFormatter
24
+
25
+
26
+ description = "Create a default config file for Accelerate with only a few flags set."
27
+
28
+
29
+ def write_basic_config(mixed_precision="no", save_location: str = default_json_config_file, use_xpu: bool = False):
30
+ """
31
+ Creates and saves a basic cluster config to be used on a local machine with potentially multiple GPUs. Will also
32
+ set CPU if it is a CPU-only machine.
33
+
34
+ Args:
35
+ mixed_precision (`str`, *optional*, defaults to "no"):
36
+ Mixed Precision to use. Should be one of "no", "fp16", or "bf16"
37
+ save_location (`str`, *optional*, defaults to `default_json_config_file`):
38
+ Optional custom save location. Should be passed to `--config_file` when using `accelerate launch`. Default
39
+ location is inside the huggingface cache folder (`~/.cache/huggingface`) but can be overriden by setting
40
+ the `HF_HOME` environmental variable, followed by `accelerate/default_config.yaml`.
41
+ use_xpu (`bool`, *optional*, defaults to `False`):
42
+ Whether to use XPU if available.
43
+ """
44
+ path = Path(save_location)
45
+ path.parent.mkdir(parents=True, exist_ok=True)
46
+ if path.exists():
47
+ print(
48
+ f"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`."
49
+ )
50
+ return False
51
+ mixed_precision = mixed_precision.lower()
52
+ if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
53
+ raise ValueError(
54
+ f"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}"
55
+ )
56
+ config = {
57
+ "compute_environment": "LOCAL_MACHINE",
58
+ "mixed_precision": mixed_precision,
59
+ }
60
+ if is_mlu_available():
61
+ num_mlus = torch.mlu.device_count()
62
+ config["num_processes"] = num_mlus
63
+ config["use_cpu"] = False
64
+ if num_mlus > 1:
65
+ config["distributed_type"] = "MULTI_MLU"
66
+ else:
67
+ config["distributed_type"] = "NO"
68
+ elif torch.cuda.is_available():
69
+ num_gpus = torch.cuda.device_count()
70
+ config["num_processes"] = num_gpus
71
+ config["use_cpu"] = False
72
+ if num_gpus > 1:
73
+ config["distributed_type"] = "MULTI_GPU"
74
+ else:
75
+ config["distributed_type"] = "NO"
76
+ elif is_xpu_available() and use_xpu:
77
+ num_xpus = torch.xpu.device_count()
78
+ config["num_processes"] = num_xpus
79
+ config["use_cpu"] = False
80
+ if num_xpus > 1:
81
+ config["distributed_type"] = "MULTI_XPU"
82
+ else:
83
+ config["distributed_type"] = "NO"
84
+ elif is_npu_available():
85
+ num_npus = torch.npu.device_count()
86
+ config["num_processes"] = num_npus
87
+ config["use_cpu"] = False
88
+ if num_npus > 1:
89
+ config["distributed_type"] = "MULTI_NPU"
90
+ else:
91
+ config["distributed_type"] = "NO"
92
+ else:
93
+ num_xpus = 0
94
+ config["use_cpu"] = True
95
+ config["num_processes"] = 1
96
+ config["distributed_type"] = "NO"
97
+ config["debug"] = False
98
+ config = ClusterConfig(**config)
99
+ config.to_json_file(path)
100
+ return path
101
+
102
+
103
+ def default_command_parser(parser, parents):
104
+ parser = parser.add_parser("default", parents=parents, help=description, formatter_class=SubcommandHelpFormatter)
105
+ parser.add_argument(
106
+ "--config_file",
107
+ default=default_json_config_file,
108
+ help=(
109
+ "The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
110
+ "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
111
+ "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
112
+ "with 'huggingface'."
113
+ ),
114
+ dest="save_location",
115
+ )
116
+
117
+ parser.add_argument(
118
+ "--mixed_precision",
119
+ choices=["no", "fp16", "bf16"],
120
+ type=str,
121
+ help="Whether or not to use mixed precision training. "
122
+ "Choose between FP16 and BF16 (bfloat16) training. "
123
+ "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.",
124
+ default="no",
125
+ )
126
+ parser.set_defaults(func=default_config_command)
127
+ return parser
128
+
129
+
130
+ def default_config_command(args):
131
+ config_file = write_basic_config(args.mixed_precision, args.save_location)
132
+ if config_file:
133
+ print(f"accelerate configuration saved at {config_file}")
venv/lib/python3.10/site-packages/accelerate/commands/config/sagemaker.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import json
17
+ import os
18
+
19
+ from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
20
+ from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
21
+ from ...utils.imports import is_boto3_available
22
+ from .config_args import SageMakerConfig
23
+ from .config_utils import (
24
+ DYNAMO_BACKENDS,
25
+ _ask_field,
26
+ _ask_options,
27
+ _convert_dynamo_backend,
28
+ _convert_mixed_precision,
29
+ _convert_sagemaker_distributed_mode,
30
+ _convert_yes_no_to_bool,
31
+ )
32
+
33
+
34
+ if is_boto3_available():
35
+ import boto3 # noqa: F401
36
+
37
+
38
+ def _create_iam_role_for_sagemaker(role_name):
39
+ iam_client = boto3.client("iam")
40
+
41
+ sagemaker_trust_policy = {
42
+ "Version": "2012-10-17",
43
+ "Statement": [
44
+ {"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"}
45
+ ],
46
+ }
47
+ try:
48
+ # create the role, associated with the chosen trust policy
49
+ iam_client.create_role(
50
+ RoleName=role_name, AssumeRolePolicyDocument=json.dumps(sagemaker_trust_policy, indent=2)
51
+ )
52
+ policy_document = {
53
+ "Version": "2012-10-17",
54
+ "Statement": [
55
+ {
56
+ "Effect": "Allow",
57
+ "Action": [
58
+ "sagemaker:*",
59
+ "ecr:GetDownloadUrlForLayer",
60
+ "ecr:BatchGetImage",
61
+ "ecr:BatchCheckLayerAvailability",
62
+ "ecr:GetAuthorizationToken",
63
+ "cloudwatch:PutMetricData",
64
+ "cloudwatch:GetMetricData",
65
+ "cloudwatch:GetMetricStatistics",
66
+ "cloudwatch:ListMetrics",
67
+ "logs:CreateLogGroup",
68
+ "logs:CreateLogStream",
69
+ "logs:DescribeLogStreams",
70
+ "logs:PutLogEvents",
71
+ "logs:GetLogEvents",
72
+ "s3:CreateBucket",
73
+ "s3:ListBucket",
74
+ "s3:GetBucketLocation",
75
+ "s3:GetObject",
76
+ "s3:PutObject",
77
+ ],
78
+ "Resource": "*",
79
+ }
80
+ ],
81
+ }
82
+ # attach policy to role
83
+ iam_client.put_role_policy(
84
+ RoleName=role_name,
85
+ PolicyName=f"{role_name}_policy_permission",
86
+ PolicyDocument=json.dumps(policy_document, indent=2),
87
+ )
88
+ except iam_client.exceptions.EntityAlreadyExistsException:
89
+ print(f"role {role_name} already exists. Using existing one")
90
+
91
+
92
+ def _get_iam_role_arn(role_name):
93
+ iam_client = boto3.client("iam")
94
+ return iam_client.get_role(RoleName=role_name)["Role"]["Arn"]
95
+
96
+
97
+ def get_sagemaker_input():
98
+ credentials_configuration = _ask_options(
99
+ "How do you want to authorize?",
100
+ ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "],
101
+ int,
102
+ )
103
+ aws_profile = None
104
+ if credentials_configuration == 0:
105
+ aws_profile = _ask_field("Enter your AWS Profile name: [default] ", default="default")
106
+ os.environ["AWS_PROFILE"] = aws_profile
107
+ else:
108
+ print(
109
+ "Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"
110
+ "`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`"
111
+ )
112
+ aws_access_key_id = _ask_field("AWS Access Key ID: ")
113
+ os.environ["AWS_ACCESS_KEY_ID"] = aws_access_key_id
114
+
115
+ aws_secret_access_key = _ask_field("AWS Secret Access Key: ")
116
+ os.environ["AWS_SECRET_ACCESS_KEY"] = aws_secret_access_key
117
+
118
+ aws_region = _ask_field("Enter your AWS Region: [us-east-1]", default="us-east-1")
119
+ os.environ["AWS_DEFAULT_REGION"] = aws_region
120
+
121
+ role_management = _ask_options(
122
+ "Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?",
123
+ ["Provide IAM Role name", "Create new IAM role using credentials"],
124
+ int,
125
+ )
126
+ if role_management == 0:
127
+ iam_role_name = _ask_field("Enter your IAM role name: ")
128
+ else:
129
+ iam_role_name = "accelerate_sagemaker_execution_role"
130
+ print(f'Accelerate will create an iam role "{iam_role_name}" using the provided credentials')
131
+ _create_iam_role_for_sagemaker(iam_role_name)
132
+
133
+ is_custom_docker_image = _ask_field(
134
+ "Do you want to use custom Docker image? [yes/NO]: ",
135
+ _convert_yes_no_to_bool,
136
+ default=False,
137
+ error_message="Please enter yes or no.",
138
+ )
139
+ docker_image = None
140
+ if is_custom_docker_image:
141
+ docker_image = _ask_field("Enter your Docker image: ", lambda x: str(x).lower())
142
+
143
+ is_sagemaker_inputs_enabled = _ask_field(
144
+ "Do you want to provide SageMaker input channels with data locations? [yes/NO]: ",
145
+ _convert_yes_no_to_bool,
146
+ default=False,
147
+ error_message="Please enter yes or no.",
148
+ )
149
+ sagemaker_inputs_file = None
150
+ if is_sagemaker_inputs_enabled:
151
+ sagemaker_inputs_file = _ask_field(
152
+ "Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ",
153
+ lambda x: str(x).lower(),
154
+ )
155
+
156
+ is_sagemaker_metrics_enabled = _ask_field(
157
+ "Do you want to enable SageMaker metrics? [yes/NO]: ",
158
+ _convert_yes_no_to_bool,
159
+ default=False,
160
+ error_message="Please enter yes or no.",
161
+ )
162
+ sagemaker_metrics_file = None
163
+ if is_sagemaker_metrics_enabled:
164
+ sagemaker_metrics_file = _ask_field(
165
+ "Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ",
166
+ lambda x: str(x).lower(),
167
+ )
168
+
169
+ distributed_type = _ask_options(
170
+ "What is the distributed mode?",
171
+ ["No distributed training", "Data parallelism"],
172
+ _convert_sagemaker_distributed_mode,
173
+ )
174
+ dynamo_config = {}
175
+ use_dynamo = _ask_field(
176
+ "Do you wish to optimize your script with torch dynamo?[yes/NO]:",
177
+ _convert_yes_no_to_bool,
178
+ default=False,
179
+ error_message="Please enter yes or no.",
180
+ )
181
+ if use_dynamo:
182
+ prefix = "dynamo_"
183
+ dynamo_config[prefix + "backend"] = _ask_options(
184
+ "Which dynamo backend would you like to use?",
185
+ [x.lower() for x in DYNAMO_BACKENDS],
186
+ _convert_dynamo_backend,
187
+ default=2,
188
+ )
189
+ use_custom_options = _ask_field(
190
+ "Do you want to customize the defaults sent to torch.compile? [yes/NO]: ",
191
+ _convert_yes_no_to_bool,
192
+ default=False,
193
+ error_message="Please enter yes or no.",
194
+ )
195
+
196
+ if use_custom_options:
197
+ dynamo_config[prefix + "mode"] = _ask_options(
198
+ "Which mode do you want to use?",
199
+ TORCH_DYNAMO_MODES,
200
+ lambda x: TORCH_DYNAMO_MODES[int(x)],
201
+ default="default",
202
+ )
203
+ dynamo_config[prefix + "use_fullgraph"] = _ask_field(
204
+ "Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ",
205
+ _convert_yes_no_to_bool,
206
+ default=False,
207
+ error_message="Please enter yes or no.",
208
+ )
209
+ dynamo_config[prefix + "use_dynamic"] = _ask_field(
210
+ "Do you want to enable dynamic shape tracing? [yes/NO]: ",
211
+ _convert_yes_no_to_bool,
212
+ default=False,
213
+ error_message="Please enter yes or no.",
214
+ )
215
+ ec2_instance_query = "Which EC2 instance type you want to use for your training?"
216
+ if distributed_type != SageMakerDistributedType.NO:
217
+ ec2_instance_type = _ask_options(
218
+ ec2_instance_query, SAGEMAKER_PARALLEL_EC2_INSTANCES, lambda x: SAGEMAKER_PARALLEL_EC2_INSTANCES[int(x)]
219
+ )
220
+ else:
221
+ ec2_instance_query += "? [ml.p3.2xlarge]:"
222
+ ec2_instance_type = _ask_field(ec2_instance_query, lambda x: str(x).lower(), default="ml.p3.2xlarge")
223
+
224
+ debug = False
225
+ if distributed_type != SageMakerDistributedType.NO:
226
+ debug = _ask_field(
227
+ "Should distributed operations be checked while running for errors? This can avoid timeout issues but will be slower. [yes/NO]: ",
228
+ _convert_yes_no_to_bool,
229
+ default=False,
230
+ error_message="Please enter yes or no.",
231
+ )
232
+
233
+ num_machines = 1
234
+ if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
235
+ num_machines = _ask_field(
236
+ "How many machines do you want use? [1]: ",
237
+ int,
238
+ default=1,
239
+ )
240
+
241
+ mixed_precision = _ask_options(
242
+ "Do you wish to use FP16 or BF16 (mixed precision)?",
243
+ ["no", "fp16", "bf16", "fp8"],
244
+ _convert_mixed_precision,
245
+ )
246
+
247
+ if use_dynamo and mixed_precision == "no":
248
+ print(
249
+ "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts."
250
+ )
251
+
252
+ return SageMakerConfig(
253
+ image_uri=docker_image,
254
+ compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER,
255
+ distributed_type=distributed_type,
256
+ use_cpu=False,
257
+ dynamo_config=dynamo_config,
258
+ ec2_instance_type=ec2_instance_type,
259
+ profile=aws_profile,
260
+ region=aws_region,
261
+ iam_role_name=iam_role_name,
262
+ mixed_precision=mixed_precision,
263
+ num_machines=num_machines,
264
+ sagemaker_inputs_file=sagemaker_inputs_file,
265
+ sagemaker_metrics_file=sagemaker_metrics_file,
266
+ debug=debug,
267
+ )
venv/lib/python3.10/site-packages/accelerate/commands/config/update.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from pathlib import Path
18
+
19
+ from .config_args import default_config_file, load_config_from_file
20
+ from .config_utils import SubcommandHelpFormatter
21
+
22
+
23
+ description = "Update an existing config file with the latest defaults while maintaining the old configuration."
24
+
25
+
26
+ def update_config(args):
27
+ """
28
+ Update an existing config file with the latest defaults while maintaining the old configuration.
29
+ """
30
+ config_file = args.config_file
31
+ if config_file is None and Path(default_config_file).exists():
32
+ config_file = default_config_file
33
+ elif not Path(config_file).exists():
34
+ raise ValueError(f"The passed config file located at {config_file} doesn't exist.")
35
+ config = load_config_from_file(config_file)
36
+
37
+ if config_file.endswith(".json"):
38
+ config.to_json_file(config_file)
39
+ else:
40
+ config.to_yaml_file(config_file)
41
+ return config_file
42
+
43
+
44
+ def update_command_parser(parser, parents):
45
+ parser = parser.add_parser("update", parents=parents, help=description, formatter_class=SubcommandHelpFormatter)
46
+ parser.add_argument(
47
+ "--config_file",
48
+ default=None,
49
+ help=(
50
+ "The path to the config file to update. Will default to a file named default_config.yaml in the cache "
51
+ "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
52
+ "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
53
+ "with 'huggingface'."
54
+ ),
55
+ )
56
+
57
+ parser.set_defaults(func=update_config_command)
58
+ return parser
59
+
60
+
61
+ def update_config_command(args):
62
+ config_file = update_config(args)
63
+ print(f"Sucessfully updated the configuration file at {config_file}.")
venv/lib/python3.10/site-packages/accelerate/test_utils/__init__.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from .testing import (
15
+ DEFAULT_LAUNCH_COMMAND,
16
+ are_the_same_tensors,
17
+ assert_exception,
18
+ device_count,
19
+ execute_subprocess_async,
20
+ get_launch_command,
21
+ memory_allocated_func,
22
+ path_in_accelerate_package,
23
+ require_bnb,
24
+ require_cpu,
25
+ require_cuda,
26
+ require_huggingface_suite,
27
+ require_mlu,
28
+ require_mps,
29
+ require_multi_device,
30
+ require_multi_gpu,
31
+ require_multi_xpu,
32
+ require_non_cpu,
33
+ require_non_torch_xla,
34
+ require_non_xpu,
35
+ require_npu,
36
+ require_pippy,
37
+ require_single_device,
38
+ require_single_gpu,
39
+ require_single_xpu,
40
+ require_torch_min_version,
41
+ require_tpu,
42
+ require_xpu,
43
+ skip,
44
+ slow,
45
+ torch_device,
46
+ )
47
+ from .training import RegressionDataset, RegressionModel, RegressionModel4XPU
48
+
49
+
50
+ from .scripts import test_script, test_sync, test_ops # isort: skip
venv/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.23 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/examples.cpython-310.pyc ADDED
Binary file (5.25 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/testing.cpython-310.pyc ADDED
Binary file (20.3 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/training.cpython-310.pyc ADDED
Binary file (4.22 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/test_utils/examples.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ A collection of utilities for comparing `examples/complete_*_example.py` scripts with the capabilities inside of each
18
+ `examples/by_feature` example. `compare_against_test` is the main function that should be used when testing, while the
19
+ others are used to either get the code that matters, or to preprocess them (such as stripping comments)
20
+ """
21
+
22
+ import os
23
+ from typing import List
24
+
25
+
26
+ def get_function_contents_by_name(lines: List[str], name: str):
27
+ """
28
+ Extracts a function from `lines` of segmented source code with the name `name`.
29
+
30
+ Args:
31
+ lines (`List[str]`):
32
+ Source code of a script seperated by line.
33
+ name (`str`):
34
+ The name of the function to extract. Should be either `training_function` or `main`
35
+ """
36
+ if name != "training_function" and name != "main":
37
+ raise ValueError(f"Incorrect function name passed: {name}, choose either 'main' or 'training_function'")
38
+ good_lines, found_start = [], False
39
+ for line in lines:
40
+ if not found_start and f"def {name}" in line:
41
+ found_start = True
42
+ good_lines.append(line)
43
+ continue
44
+ if found_start:
45
+ if name == "training_function" and "def main" in line:
46
+ return good_lines
47
+ if name == "main" and "if __name__" in line:
48
+ return good_lines
49
+ good_lines.append(line)
50
+
51
+
52
+ def clean_lines(lines: List[str]):
53
+ """
54
+ Filters `lines` and removes any entries that start with a comment ('#') or is just a newline ('\n')
55
+
56
+ Args:
57
+ lines (`List[str]`):
58
+ Source code of a script seperated by line.
59
+ """
60
+ return [line for line in lines if not line.lstrip().startswith("#") and line != "\n"]
61
+
62
+
63
+ def compare_against_test(base_filename: str, feature_filename: str, parser_only: bool, secondary_filename: str = None):
64
+ """
65
+ Tests whether the additional code inside of `feature_filename` was implemented in `base_filename`. This should be
66
+ used when testing to see if `complete_*_.py` examples have all of the implementations from each of the
67
+ `examples/by_feature/*` scripts.
68
+
69
+ It utilizes `nlp_example.py` to extract out all of the repeated training code, so that only the new additional code
70
+ is examined and checked. If something *other* than `nlp_example.py` should be used, such as `cv_example.py` for the
71
+ `complete_cv_example.py` script, it should be passed in for the `secondary_filename` parameter.
72
+
73
+ Args:
74
+ base_filename (`str` or `os.PathLike`):
75
+ The filepath of a single "complete" example script to test, such as `examples/complete_cv_example.py`
76
+ feature_filename (`str` or `os.PathLike`):
77
+ The filepath of a single feature example script. The contents of this script are checked to see if they
78
+ exist in `base_filename`
79
+ parser_only (`bool`):
80
+ Whether to compare only the `main()` sections in both files, or to compare the contents of
81
+ `training_loop()`
82
+ secondary_filename (`str`, *optional*):
83
+ A potential secondary filepath that should be included in the check. This function extracts the base
84
+ functionalities off of "examples/nlp_example.py", so if `base_filename` is a script other than
85
+ `complete_nlp_example.py`, the template script should be included here. Such as `examples/cv_example.py`
86
+ """
87
+ with open(base_filename) as f:
88
+ base_file_contents = f.readlines()
89
+ with open(os.path.abspath(os.path.join("examples", "nlp_example.py"))) as f:
90
+ full_file_contents = f.readlines()
91
+ with open(feature_filename) as f:
92
+ feature_file_contents = f.readlines()
93
+ if secondary_filename is not None:
94
+ with open(secondary_filename) as f:
95
+ secondary_file_contents = f.readlines()
96
+
97
+ # This is our base, we remove all the code from here in our `full_filename` and `feature_filename` to find the new content
98
+ if parser_only:
99
+ base_file_func = clean_lines(get_function_contents_by_name(base_file_contents, "main"))
100
+ full_file_func = clean_lines(get_function_contents_by_name(full_file_contents, "main"))
101
+ feature_file_func = clean_lines(get_function_contents_by_name(feature_file_contents, "main"))
102
+ if secondary_filename is not None:
103
+ secondary_file_func = clean_lines(get_function_contents_by_name(secondary_file_contents, "main"))
104
+ else:
105
+ base_file_func = clean_lines(get_function_contents_by_name(base_file_contents, "training_function"))
106
+ full_file_func = clean_lines(get_function_contents_by_name(full_file_contents, "training_function"))
107
+ feature_file_func = clean_lines(get_function_contents_by_name(feature_file_contents, "training_function"))
108
+ if secondary_filename is not None:
109
+ secondary_file_func = clean_lines(
110
+ get_function_contents_by_name(secondary_file_contents, "training_function")
111
+ )
112
+
113
+ _dl_line = "train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)\n"
114
+
115
+ # Specific code in our script that differs from the full version, aka what is new
116
+ new_feature_code = []
117
+ passed_idxs = [] # We keep track of the idxs just in case it's a repeated statement
118
+ it = iter(feature_file_func)
119
+ for i in range(len(feature_file_func) - 1):
120
+ if i not in passed_idxs:
121
+ line = next(it)
122
+ if (line not in full_file_func) and (line.lstrip() != _dl_line):
123
+ if "TESTING_MOCKED_DATALOADERS" not in line:
124
+ new_feature_code.append(line)
125
+ passed_idxs.append(i)
126
+ else:
127
+ # Skip over the `config['num_epochs'] = 2` statement
128
+ _ = next(it)
129
+
130
+ # Extract out just the new parts from the full_file_training_func
131
+ new_full_example_parts = []
132
+ passed_idxs = [] # We keep track of the idxs just in case it's a repeated statement
133
+ for i, line in enumerate(base_file_func):
134
+ if i not in passed_idxs:
135
+ if (line not in full_file_func) and (line.lstrip() != _dl_line):
136
+ if "TESTING_MOCKED_DATALOADERS" not in line:
137
+ new_full_example_parts.append(line)
138
+ passed_idxs.append(i)
139
+
140
+ # Finally, get the overall diff
141
+ diff_from_example = [line for line in new_feature_code if line not in new_full_example_parts]
142
+ if secondary_filename is not None:
143
+ diff_from_two = [line for line in full_file_contents if line not in secondary_file_func]
144
+ diff_from_example = [line for line in diff_from_example if line not in diff_from_two]
145
+
146
+ return diff_from_example
venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (197 Bytes). View file
 
venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_cli.cpython-310.pyc ADDED
Binary file (489 Bytes). View file
 
venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_distributed_data_loop.cpython-310.pyc ADDED
Binary file (6.47 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_notebook.cpython-310.pyc ADDED
Binary file (1.51 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_ops.cpython-310.pyc ADDED
Binary file (4.6 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_script.cpython-310.pyc ADDED
Binary file (20.2 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_sync.cpython-310.pyc ADDED
Binary file (9.05 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (211 Bytes). View file
 
venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_checkpointing.cpython-310.pyc ADDED
Binary file (6.54 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_metrics.cpython-310.pyc ADDED
Binary file (9.85 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_peak_memory_usage.cpython-310.pyc ADDED
Binary file (7.17 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_performance.cpython-310.pyc ADDED
Binary file (5.9 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_pippy.cpython-310.pyc ADDED
Binary file (2.98 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_zero3_integration.cpython-310.pyc ADDED
Binary file (1.14 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_checkpointing.py ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import argparse
15
+ import json
16
+ import os
17
+
18
+ import evaluate
19
+ import torch
20
+ from datasets import load_dataset
21
+ from torch.optim import AdamW
22
+ from torch.utils.data import DataLoader
23
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
24
+
25
+ from accelerate import Accelerator, DistributedType
26
+ from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
27
+
28
+
29
+ MAX_GPU_BATCH_SIZE = 16
30
+ EVAL_BATCH_SIZE = 32
31
+
32
+
33
+ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name: str = "bert-base-cased"):
34
+ """
35
+ Creates a set of `DataLoader`s for the `glue` dataset.
36
+
37
+ Args:
38
+ accelerator (`Accelerator`):
39
+ An `Accelerator` object
40
+ batch_size (`int`, *optional*):
41
+ The batch size for the train and validation DataLoaders.
42
+ model_name (`str`, *optional*):
43
+ """
44
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
45
+ datasets = load_dataset("glue", "mrpc")
46
+
47
+ def tokenize_function(examples):
48
+ # max_length=None => use the model max length (it's actually the default)
49
+ outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
50
+ return outputs
51
+
52
+ # Apply the method we just defined to all the examples in all the splits of the dataset
53
+ tokenized_datasets = datasets.map(
54
+ tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False
55
+ )
56
+
57
+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
58
+ # transformers library
59
+ tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
60
+
61
+ def collate_fn(examples):
62
+ # On TPU it's best to pad everything to the same length or training will be very slow.
63
+ if accelerator.distributed_type == DistributedType.XLA:
64
+ return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
65
+ return tokenizer.pad(examples, padding="longest", return_tensors="pt")
66
+
67
+ # Instantiate dataloaders.
68
+ train_dataloader = DataLoader(
69
+ tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
70
+ )
71
+ eval_dataloader = DataLoader(
72
+ tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
73
+ )
74
+
75
+ return train_dataloader, eval_dataloader
76
+
77
+
78
+ def evaluation_loop(accelerator, model, eval_dataloader, metric):
79
+ model.eval()
80
+ samples_seen = 0
81
+ for step, batch in enumerate(eval_dataloader):
82
+ # We could avoid this line since we set the accelerator with `device_placement=True`.
83
+ batch.to(accelerator.device)
84
+ with torch.no_grad():
85
+ outputs = model(**batch)
86
+ predictions = outputs.logits.argmax(dim=-1)
87
+ # It is slightly faster to call this once, than multiple times
88
+ predictions, references = accelerator.gather(
89
+ (predictions, batch["labels"])
90
+ ) # If we are in a multiprocess environment, the last batch has duplicates
91
+ if accelerator.use_distributed:
92
+ if step == len(eval_dataloader) - 1:
93
+ predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]
94
+ references = references[: len(eval_dataloader.dataset) - samples_seen]
95
+ else:
96
+ samples_seen += references.shape[0]
97
+ metric.add_batch(
98
+ predictions=predictions,
99
+ references=references,
100
+ )
101
+
102
+ eval_metric = metric.compute()
103
+ return eval_metric["accuracy"]
104
+
105
+
106
+ def training_function(config, args):
107
+ # Initialize accelerator
108
+ accelerator = Accelerator()
109
+
110
+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
111
+ lr = config["lr"]
112
+ num_epochs = int(config["num_epochs"])
113
+ seed = int(config["seed"])
114
+ batch_size = int(config["batch_size"])
115
+ model_name = args.model_name_or_path
116
+
117
+ set_seed(seed)
118
+ train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name)
119
+
120
+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)
121
+ model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True)
122
+
123
+ # Instantiate optimizer
124
+ optimizer_cls = (
125
+ AdamW
126
+ if accelerator.state.deepspeed_plugin is None
127
+ or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
128
+ else DummyOptim
129
+ )
130
+ optimizer = optimizer_cls(params=model.parameters(), lr=lr)
131
+
132
+ if accelerator.state.deepspeed_plugin is not None:
133
+ gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[
134
+ "gradient_accumulation_steps"
135
+ ]
136
+ else:
137
+ gradient_accumulation_steps = 1
138
+ max_training_steps = (len(train_dataloader) * num_epochs) // gradient_accumulation_steps
139
+
140
+ # Instantiate scheduler
141
+ if (
142
+ accelerator.state.deepspeed_plugin is None
143
+ or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
144
+ ):
145
+ lr_scheduler = get_linear_schedule_with_warmup(
146
+ optimizer=optimizer,
147
+ num_warmup_steps=0,
148
+ num_training_steps=max_training_steps,
149
+ )
150
+ else:
151
+ lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0)
152
+
153
+ # Prepare everything
154
+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
155
+ # prepare method.
156
+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
157
+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
158
+ )
159
+
160
+ # We need to keep track of how many total steps we have iterated over
161
+ overall_step = 0
162
+ # We also need to keep track of the stating epoch so files are named properly
163
+ starting_epoch = 0
164
+ metric = evaluate.load("glue", "mrpc")
165
+ ending_epoch = num_epochs
166
+
167
+ if args.partial_train_epoch is not None:
168
+ ending_epoch = args.partial_train_epoch
169
+
170
+ if args.resume_from_checkpoint:
171
+ accelerator.load_state(args.resume_from_checkpoint)
172
+ epoch_string = args.resume_from_checkpoint.split("epoch_")[1]
173
+ state_epoch_num = ""
174
+ for char in epoch_string:
175
+ if char.isdigit():
176
+ state_epoch_num += char
177
+ else:
178
+ break
179
+ starting_epoch = int(state_epoch_num) + 1
180
+ accuracy = evaluation_loop(accelerator, model, eval_dataloader, metric)
181
+ accelerator.print("resumed checkpoint performance:", accuracy)
182
+ accelerator.print("resumed checkpoint's scheduler's lr:", lr_scheduler.get_lr()[0])
183
+ accelerator.print("resumed optimizers's lr:", optimizer.param_groups[0]["lr"])
184
+ with open(os.path.join(args.output_dir, f"state_{starting_epoch - 1}.json")) as f:
185
+ resumed_state = json.load(f)
186
+ assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
187
+ assert (
188
+ resumed_state["lr"] == lr_scheduler.get_lr()[0]
189
+ ), "Scheduler learning rate mismatch, loading from checkpoint failed"
190
+ assert (
191
+ resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
192
+ ), "Optimizer learning rate mismatch, loading from checkpoint failed"
193
+ assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
194
+ return
195
+
196
+ # Now we train the model
197
+ state = {}
198
+ for epoch in range(starting_epoch, ending_epoch):
199
+ model.train()
200
+ for step, batch in enumerate(train_dataloader):
201
+ outputs = model(**batch)
202
+ loss = outputs.loss
203
+ loss = loss / gradient_accumulation_steps
204
+ accelerator.backward(loss)
205
+ if step % gradient_accumulation_steps == 0:
206
+ optimizer.step()
207
+ lr_scheduler.step()
208
+ optimizer.zero_grad()
209
+
210
+ overall_step += 1
211
+ output_dir = f"epoch_{epoch}"
212
+ output_dir = os.path.join(args.output_dir, output_dir)
213
+ accelerator.save_state(output_dir)
214
+ accuracy = evaluation_loop(accelerator, model, eval_dataloader, metric)
215
+ state["accuracy"] = accuracy
216
+ state["lr"] = lr_scheduler.get_lr()[0]
217
+ state["optimizer_lr"] = optimizer.param_groups[0]["lr"]
218
+ state["epoch"] = epoch
219
+ state["step"] = overall_step
220
+ accelerator.print(f"epoch {epoch}:", state)
221
+
222
+ accelerator.wait_for_everyone()
223
+ if accelerator.is_main_process:
224
+ with open(os.path.join(args.output_dir, f"state_{epoch}.json"), "w") as f:
225
+ json.dump(state, f)
226
+
227
+
228
+ def main():
229
+ parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.")
230
+ parser.add_argument(
231
+ "--model_name_or_path",
232
+ type=str,
233
+ default="bert-base-cased",
234
+ help="Path to pretrained model or model identifier from huggingface.co/models.",
235
+ required=False,
236
+ )
237
+ parser.add_argument(
238
+ "--output_dir",
239
+ type=str,
240
+ default=".",
241
+ help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.",
242
+ )
243
+ parser.add_argument(
244
+ "--resume_from_checkpoint",
245
+ type=str,
246
+ default=None,
247
+ help="If the training should continue from a checkpoint folder.",
248
+ )
249
+ parser.add_argument(
250
+ "--partial_train_epoch",
251
+ type=int,
252
+ default=None,
253
+ help="If passed, the training will stop after this number of epochs.",
254
+ )
255
+ parser.add_argument(
256
+ "--num_epochs",
257
+ type=int,
258
+ default=2,
259
+ help="Number of train epochs.",
260
+ )
261
+ args = parser.parse_args()
262
+ config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
263
+
264
+ training_function(config, args)
265
+
266
+
267
+ if __name__ == "__main__":
268
+ main()
venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_metrics.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import logging
16
+ import math
17
+ import os
18
+ from copy import deepcopy
19
+
20
+ import datasets
21
+ import evaluate
22
+ import torch
23
+ import transformers
24
+ from datasets import load_dataset
25
+ from torch.utils.data import DataLoader, IterableDataset
26
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
27
+
28
+ from accelerate import Accelerator, DataLoaderConfiguration, DistributedType
29
+ from accelerate.data_loader import DataLoaderDispatcher
30
+ from accelerate.test_utils import RegressionDataset, RegressionModel, torch_device
31
+ from accelerate.utils import is_torch_xla_available, set_seed
32
+
33
+
34
+ os.environ["TRANSFORMERS_NO_ADVISORY_WARNINGS"] = "true"
35
+
36
+
37
+ class ListHandler(logging.Handler):
38
+ def __init__(self, *args, **kwargs):
39
+ super().__init__(*args, **kwargs)
40
+ self.logs = []
41
+
42
+ def emit(self, record):
43
+ self.logs.append(record)
44
+
45
+
46
+ def get_basic_setup(accelerator, num_samples=82, batch_size=16):
47
+ "Returns everything needed to perform basic training"
48
+ set_seed(42)
49
+ model = RegressionModel()
50
+ ddp_model = deepcopy(model)
51
+ dset = RegressionDataset(length=num_samples)
52
+ dataloader = DataLoader(dset, batch_size=batch_size)
53
+ model.to(accelerator.device)
54
+ ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader)
55
+ return model, ddp_model, dataloader
56
+
57
+
58
+ def get_dataloader(accelerator: Accelerator, use_longest=False):
59
+ tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased")
60
+ dataset = load_dataset("glue", "mrpc", split="validation")
61
+
62
+ def tokenize_function(examples):
63
+ outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
64
+ return outputs
65
+
66
+ with accelerator.main_process_first():
67
+ tokenized_datasets = dataset.map(
68
+ tokenize_function,
69
+ batched=True,
70
+ remove_columns=["idx", "sentence1", "sentence2"],
71
+ )
72
+
73
+ tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
74
+
75
+ def collate_fn(examples):
76
+ if use_longest:
77
+ return tokenizer.pad(examples, padding="longest", return_tensors="pt")
78
+ return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
79
+
80
+ return DataLoader(tokenized_datasets, shuffle=False, collate_fn=collate_fn, batch_size=16)
81
+
82
+
83
+ def get_mrpc_setup(dispatch_batches, split_batches):
84
+ dataloader_config = DataLoaderConfiguration(dispatch_batches=dispatch_batches, split_batches=split_batches)
85
+ accelerator = Accelerator(dataloader_config=dataloader_config)
86
+ dataloader = get_dataloader(accelerator, not dispatch_batches)
87
+ model = AutoModelForSequenceClassification.from_pretrained(
88
+ "hf-internal-testing/mrpc-bert-base-cased", return_dict=True
89
+ )
90
+ ddp_model, ddp_dataloader = accelerator.prepare(model, dataloader)
91
+ return {
92
+ "ddp": [ddp_model, ddp_dataloader, torch_device],
93
+ "no": [model, dataloader, accelerator.device],
94
+ }, accelerator
95
+
96
+
97
+ def generate_predictions(model, dataloader, accelerator):
98
+ logits_and_targets = []
99
+ for batch in dataloader:
100
+ input, target = batch.values()
101
+ with torch.no_grad():
102
+ logit = model(input)
103
+ logit, target = accelerator.gather_for_metrics((logit, target))
104
+ logits_and_targets.append((logit, target))
105
+ logits, targs = [], []
106
+ for logit, targ in logits_and_targets:
107
+ logits.append(logit)
108
+ targs.append(targ)
109
+ logits, targs = torch.cat(logits), torch.cat(targs)
110
+ return logits, targs
111
+
112
+
113
+ def test_torch_metrics(
114
+ accelerator: Accelerator, num_samples=82, dispatch_batches=False, split_batches=False, batch_size=16
115
+ ):
116
+ _, ddp_model, dataloader = get_basic_setup(accelerator, num_samples, batch_size)
117
+ logits, _ = generate_predictions(ddp_model, dataloader, accelerator)
118
+ assert (
119
+ len(logits) == num_samples
120
+ ), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(logits)}"
121
+
122
+
123
+ def test_mrpc(dispatch_batches: bool = False, split_batches: bool = False):
124
+ metric = evaluate.load("glue", "mrpc")
125
+ setup, accelerator = get_mrpc_setup(dispatch_batches, split_batches)
126
+ # First do baseline
127
+ model, dataloader, device = setup["no"]
128
+ model.to(device)
129
+ model.eval()
130
+ for batch in dataloader:
131
+ batch.to(device)
132
+ with torch.inference_mode():
133
+ outputs = model(**batch)
134
+ preds = outputs.logits.argmax(dim=-1)
135
+ metric.add_batch(predictions=preds, references=batch["labels"])
136
+ baseline = metric.compute()
137
+
138
+ # Then do distributed
139
+ model, dataloader, device = setup["ddp"]
140
+ model.eval()
141
+ for batch in dataloader:
142
+ with torch.inference_mode():
143
+ outputs = model(**batch)
144
+ preds = outputs.logits.argmax(dim=-1)
145
+ references = batch["labels"]
146
+ preds, references = accelerator.gather_for_metrics((preds, references))
147
+ metric.add_batch(predictions=preds, references=references)
148
+ distributed = metric.compute()
149
+
150
+ for key in "accuracy f1".split():
151
+ assert math.isclose(
152
+ baseline[key], distributed[key]
153
+ ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
154
+
155
+
156
+ def test_gather_for_metrics_with_non_tensor_objects_iterable_dataset():
157
+ class DummyIterableDataset(IterableDataset):
158
+ def __init__(self, data):
159
+ self.data = data
160
+
161
+ def __len__(self):
162
+ return len(self.data)
163
+
164
+ def __iter__(self):
165
+ yield from self.data
166
+
167
+ iterable_dataset = DummyIterableDataset([n for n in range(30)])
168
+ dataloader = DataLoader(iterable_dataset, batch_size=4)
169
+ accelerator = Accelerator()
170
+ prepared_dataloader = accelerator.prepare(dataloader)
171
+
172
+ if accelerator.is_main_process:
173
+ logger = logging.root.manager.loggerDict["accelerate.accelerator"]
174
+ list_handler = ListHandler()
175
+ logger.addHandler(list_handler)
176
+
177
+ batches_for_metrics = []
178
+ for batch in prepared_dataloader:
179
+ batches_for_metrics.append(accelerator.gather_for_metrics(batch))
180
+
181
+ assert torch.cat(batches_for_metrics).size(0) == 30
182
+
183
+ if accelerator.is_main_process:
184
+ assert len(list_handler.logs) == 0
185
+ logger.removeHandler(list_handler)
186
+
187
+
188
+ def test_gather_for_metrics_with_iterable_dataset():
189
+ class DummyIterableDataset(IterableDataset):
190
+ def __init__(self, data):
191
+ self.data = data
192
+
193
+ def __len__(self):
194
+ return len(self.data)
195
+
196
+ def __iter__(self):
197
+ yield from self.data
198
+
199
+ iterable_dataset = DummyIterableDataset(torch.as_tensor(range(30)))
200
+ dataloader = DataLoader(iterable_dataset, batch_size=4)
201
+
202
+ accelerator = Accelerator()
203
+ prepared_dataloader = accelerator.prepare(dataloader)
204
+
205
+ assert isinstance(prepared_dataloader, DataLoaderDispatcher)
206
+
207
+ if accelerator.is_main_process:
208
+ logger = logging.root.manager.loggerDict["accelerate.accelerator"]
209
+ list_handler = ListHandler()
210
+ logger.addHandler(list_handler)
211
+
212
+ batches_for_metrics = []
213
+ for batch in prepared_dataloader:
214
+ batches_for_metrics.append(accelerator.gather_for_metrics(batch))
215
+
216
+ assert torch.cat(batches_for_metrics).size(0) == 30
217
+
218
+ if accelerator.is_main_process:
219
+ assert len(list_handler.logs) == 0
220
+
221
+ logger.removeHandler(list_handler)
222
+
223
+
224
+ def test_gather_for_metrics_drop_last():
225
+ accelerator = Accelerator()
226
+ per_device_batch_size = 5
227
+ num_items = (10 * accelerator.num_processes) + 1
228
+ dataloader = DataLoader(range(num_items), batch_size=per_device_batch_size, drop_last=True)
229
+ dataloader = accelerator.prepare(dataloader)
230
+
231
+ iterator = iter(dataloader)
232
+ next(iterator) # Skip first batch tensor([0, 1, 2, 3, 4], device='cuda:0')
233
+ batch = next(iterator)
234
+ gathered_items = accelerator.gather_for_metrics(batch)
235
+
236
+ # Should return a full set of complete batches from each GPU
237
+ num_expected_items = per_device_batch_size * accelerator.num_processes
238
+ assert gathered_items.size(0) == (
239
+ num_expected_items
240
+ ), f"Expected number of items: {num_expected_items}, Actual: {gathered_items.size(0)}"
241
+
242
+
243
+ def main():
244
+ dataloader_config = DataLoaderConfiguration(split_batches=False, dispatch_batches=False)
245
+ accelerator = Accelerator(dataloader_config=dataloader_config)
246
+ if accelerator.is_local_main_process:
247
+ datasets.utils.logging.set_verbosity_warning()
248
+ transformers.utils.logging.set_verbosity_warning()
249
+ else:
250
+ datasets.utils.logging.set_verbosity_error()
251
+ transformers.utils.logging.set_verbosity_error()
252
+ # TorchXLA does not support batch dispatching. 'put_on_device' is always False for
253
+ # TorchXLA, which can cause a value error in 'prepare_data_loader' function.
254
+ dispatch_batches_options = [False] if accelerator.state.distributed_type == DistributedType.XLA else [True, False]
255
+
256
+ # Temporarily close this test for TorchXLA due to the 'Cannot set version_counter for
257
+ # inference tensor' error in inference mode. Reopen it after TorchXLA fixes this bug.
258
+ # These are a bit slower so they should only be ran on the GPU or TPU
259
+ if accelerator.device.type != "cpu" and not is_torch_xla_available():
260
+ if accelerator.is_local_main_process:
261
+ print("**Testing gather_for_metrics**")
262
+ for split_batches in [True, False]:
263
+ for dispatch_batches in dispatch_batches_options:
264
+ if accelerator.is_local_main_process:
265
+ print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`")
266
+ test_mrpc(dispatch_batches, split_batches)
267
+ accelerator.state._reset_state()
268
+ print("test_gather_for_metrics_with_iterable_dataset")
269
+ test_gather_for_metrics_with_iterable_dataset()
270
+ print("test gather_for_metrics_with_non_tensor_objects_iterable_dataset")
271
+ test_gather_for_metrics_with_non_tensor_objects_iterable_dataset()
272
+
273
+ # MpDeviceLoader in TorchXLA is an asynchronous loader that preloads several batches into cache.
274
+ # This can cause the 'end_of_dataloader' of DataLoaderStateMixin to be set earlier than intended.
275
+ # Skip this test when TorchXLA is enabled.
276
+ if accelerator.state.distributed_type != DistributedType.XLA:
277
+ if accelerator.is_local_main_process:
278
+ print("**Test torch metrics**")
279
+ for split_batches in [True, False]:
280
+ for dispatch_batches in dispatch_batches_options:
281
+ dataloader_config = DataLoaderConfiguration(
282
+ split_batches=split_batches, dispatch_batches=dispatch_batches
283
+ )
284
+ accelerator = Accelerator(dataloader_config=dataloader_config)
285
+ if accelerator.is_local_main_process:
286
+ print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99")
287
+ test_torch_metrics(accelerator, 99)
288
+ accelerator.state._reset_state()
289
+ if accelerator.is_local_main_process:
290
+ print("**Test last batch is not dropped when perfectly divisible**")
291
+ accelerator = Accelerator()
292
+ test_torch_metrics(accelerator, 512)
293
+ accelerator.state._reset_state()
294
+ if accelerator.is_local_main_process:
295
+ print("**Test that `drop_last` is taken into account**")
296
+ test_gather_for_metrics_drop_last()
297
+ accelerator.state._reset_state()
298
+
299
+
300
+ def _mp_fn(index):
301
+ # For xla_spawn (TPUs)
302
+ main()
303
+
304
+
305
+ if __name__ == "__main__":
306
+ main()
venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import argparse
15
+ import gc
16
+ import json
17
+ import os
18
+
19
+ import torch
20
+ from datasets import load_dataset
21
+ from torch.optim import AdamW
22
+ from torch.utils.data import DataLoader
23
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
24
+
25
+ from accelerate import Accelerator, DistributedType
26
+ from accelerate.utils import is_mlu_available, is_npu_available, is_xpu_available
27
+ from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
28
+
29
+
30
+ MAX_GPU_BATCH_SIZE = 16
31
+ EVAL_BATCH_SIZE = 32
32
+
33
+
34
+ # Converting Bytes to Megabytes
35
+ def b2mb(x):
36
+ return int(x / 2**20)
37
+
38
+
39
+ # This context manager is used to track the peak memory usage of the process
40
+ class TorchTracemalloc:
41
+ def __enter__(self):
42
+ gc.collect()
43
+ if torch.cuda.is_available():
44
+ torch.cuda.empty_cache()
45
+ torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
46
+ self.begin = torch.cuda.memory_allocated()
47
+ elif is_mlu_available():
48
+ torch.mlu.empty_cache()
49
+ torch.mlu.reset_max_memory_allocated() # reset the peak gauge to zero
50
+ self.begin = torch.mlu.memory_allocated()
51
+ elif is_npu_available():
52
+ torch.npu.empty_cache()
53
+ torch.npu.reset_max_memory_allocated() # reset the peak gauge to zero
54
+ self.begin = torch.npu.memory_allocated()
55
+ elif is_xpu_available():
56
+ torch.xpu.empty_cache()
57
+ torch.xpu.reset_max_memory_allocated() # reset the peak gauge to zero
58
+ self.begin = torch.xpu.memory_allocated()
59
+ return self
60
+
61
+ def __exit__(self, *exc):
62
+ gc.collect()
63
+ if torch.cuda.is_available():
64
+ torch.cuda.empty_cache()
65
+ self.end = torch.cuda.memory_allocated()
66
+ self.peak = torch.cuda.max_memory_allocated()
67
+ elif is_mlu_available():
68
+ torch.mlu.empty_cache()
69
+ torch.mlu.memory_allocated() # reset the peak gauge to zero
70
+ self.begin = torch.mlu.max_memory_allocated()
71
+ elif is_npu_available():
72
+ torch.npu.empty_cache()
73
+ self.end = torch.npu.memory_allocated()
74
+ self.peak = torch.npu.max_memory_allocated()
75
+ elif is_xpu_available():
76
+ torch.xpu.empty_cache()
77
+ self.end = torch.xpu.memory_allocated()
78
+ self.peak = torch.xpu.max_memory_allocated()
79
+ self.used = b2mb(self.end - self.begin)
80
+ self.peaked = b2mb(self.peak - self.begin)
81
+ # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
82
+
83
+
84
+ def get_dataloaders(
85
+ accelerator: Accelerator,
86
+ batch_size: int = 16,
87
+ model_name: str = "bert-base-cased",
88
+ n_train: int = 320,
89
+ n_val: int = 160,
90
+ ):
91
+ """
92
+ Creates a set of `DataLoader`s for the `glue` dataset.
93
+
94
+ Args:
95
+ accelerator (`Accelerator`):
96
+ An `Accelerator` object
97
+ batch_size (`int`, *optional*):
98
+ The batch size for the train and validation DataLoaders.
99
+ model_name (`str`, *optional*):
100
+ The name of the model to use.
101
+ n_train (`int`, *optional*):
102
+ The number of training examples to use.
103
+ n_val (`int`, *optional*):
104
+ The number of validation examples to use.
105
+ """
106
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
107
+ datasets = load_dataset(
108
+ "glue", "mrpc", split={"train": f"train[:{n_train}]", "validation": f"validation[:{n_val}]"}
109
+ )
110
+
111
+ def tokenize_function(examples):
112
+ # max_length=None => use the model max length (it's actually the default)
113
+ outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
114
+ return outputs
115
+
116
+ # Apply the method we just defined to all the examples in all the splits of the dataset
117
+ tokenized_datasets = datasets.map(
118
+ tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False
119
+ )
120
+
121
+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
122
+ # transformers library
123
+ tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
124
+
125
+ def collate_fn(examples):
126
+ # On TPU it's best to pad everything to the same length or training will be very slow.
127
+ if accelerator.distributed_type == DistributedType.XLA:
128
+ return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
129
+ return tokenizer.pad(examples, padding="longest", return_tensors="pt")
130
+
131
+ # Instantiate dataloaders.
132
+ train_dataloader = DataLoader(
133
+ tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
134
+ )
135
+ eval_dataloader = DataLoader(
136
+ tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
137
+ )
138
+
139
+ return train_dataloader, eval_dataloader
140
+
141
+
142
+ def training_function(config, args):
143
+ # Initialize accelerator
144
+ accelerator = Accelerator()
145
+
146
+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
147
+ lr = config["lr"]
148
+ num_epochs = int(config["num_epochs"])
149
+ seed = int(config["seed"])
150
+ batch_size = int(config["batch_size"])
151
+ model_name = args.model_name_or_path
152
+
153
+ set_seed(seed)
154
+ train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name, args.n_train, args.n_val)
155
+
156
+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)
157
+ model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True)
158
+
159
+ # Instantiate optimizer
160
+ optimizer_cls = (
161
+ AdamW
162
+ if accelerator.state.deepspeed_plugin is None
163
+ or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
164
+ else DummyOptim
165
+ )
166
+ optimizer = optimizer_cls(params=model.parameters(), lr=lr)
167
+
168
+ if accelerator.state.deepspeed_plugin is not None:
169
+ gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[
170
+ "gradient_accumulation_steps"
171
+ ]
172
+ else:
173
+ gradient_accumulation_steps = 1
174
+ max_training_steps = (len(train_dataloader) * num_epochs) // gradient_accumulation_steps
175
+
176
+ # Instantiate scheduler
177
+ if (
178
+ accelerator.state.deepspeed_plugin is None
179
+ or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
180
+ ):
181
+ lr_scheduler = get_linear_schedule_with_warmup(
182
+ optimizer=optimizer,
183
+ num_warmup_steps=0,
184
+ num_training_steps=max_training_steps,
185
+ )
186
+ else:
187
+ lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0)
188
+
189
+ # Prepare everything
190
+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
191
+ # prepare method.
192
+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
193
+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
194
+ )
195
+
196
+ # We need to keep track of how many total steps we have iterated over
197
+ overall_step = 0
198
+ # We also need to keep track of the stating epoch so files are named properly
199
+ starting_epoch = 0
200
+
201
+ # Now we train the model
202
+ train_total_peak_memory = {}
203
+ for epoch in range(starting_epoch, num_epochs):
204
+ with TorchTracemalloc() as tracemalloc:
205
+ model.train()
206
+ for step, batch in enumerate(train_dataloader):
207
+ outputs = model(**batch)
208
+ loss = outputs.loss
209
+ loss = loss / gradient_accumulation_steps
210
+ accelerator.backward(loss)
211
+ if step % gradient_accumulation_steps == 0:
212
+ optimizer.step()
213
+ lr_scheduler.step()
214
+ optimizer.zero_grad()
215
+
216
+ overall_step += 1
217
+
218
+ # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
219
+ accelerator.print(f"Memory before entering the train : {b2mb(tracemalloc.begin)}")
220
+ accelerator.print(f"Memory consumed at the end of the train (end-begin): {tracemalloc.used}")
221
+ accelerator.print(f"Peak Memory consumed during the train (max-begin): {tracemalloc.peaked}")
222
+ accelerator.print(
223
+ f"Total Peak Memory consumed during the train (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}"
224
+ )
225
+ train_total_peak_memory[f"epoch-{epoch}"] = tracemalloc.peaked + b2mb(tracemalloc.begin)
226
+ if args.peak_memory_upper_bound is not None:
227
+ assert (
228
+ train_total_peak_memory[f"epoch-{epoch}"] <= args.peak_memory_upper_bound
229
+ ), "Peak memory usage exceeded the upper bound"
230
+
231
+ accelerator.wait_for_everyone()
232
+ if accelerator.is_main_process:
233
+ with open(os.path.join(args.output_dir, "peak_memory_utilization.json"), "w") as f:
234
+ json.dump(train_total_peak_memory, f)
235
+
236
+
237
+ def main():
238
+ parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.")
239
+ parser.add_argument(
240
+ "--model_name_or_path",
241
+ type=str,
242
+ default="bert-base-cased",
243
+ help="Path to pretrained model or model identifier from huggingface.co/models.",
244
+ required=False,
245
+ )
246
+ parser.add_argument(
247
+ "--output_dir",
248
+ type=str,
249
+ default=".",
250
+ help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.",
251
+ )
252
+ parser.add_argument(
253
+ "--peak_memory_upper_bound",
254
+ type=float,
255
+ default=None,
256
+ help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.",
257
+ )
258
+ parser.add_argument(
259
+ "--n_train",
260
+ type=int,
261
+ default=320,
262
+ help="Number of training examples to use.",
263
+ )
264
+ parser.add_argument(
265
+ "--n_val",
266
+ type=int,
267
+ default=160,
268
+ help="Number of validation examples to use.",
269
+ )
270
+ parser.add_argument(
271
+ "--num_epochs",
272
+ type=int,
273
+ default=1,
274
+ help="Number of train epochs.",
275
+ )
276
+ args = parser.parse_args()
277
+ config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
278
+ training_function(config, args)
279
+
280
+
281
+ if __name__ == "__main__":
282
+ main()
venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_performance.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import argparse
15
+ import json
16
+ import os
17
+
18
+ import evaluate
19
+ import torch
20
+ from datasets import load_dataset
21
+ from torch.optim import AdamW
22
+ from torch.utils.data import DataLoader
23
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
24
+
25
+ from accelerate import Accelerator, DistributedType
26
+ from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
27
+
28
+
29
+ MAX_GPU_BATCH_SIZE = 16
30
+ EVAL_BATCH_SIZE = 32
31
+
32
+
33
+ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name: str = "bert-base-cased"):
34
+ """
35
+ Creates a set of `DataLoader`s for the `glue` dataset.
36
+
37
+ Args:
38
+ accelerator (`Accelerator`):
39
+ An `Accelerator` object
40
+ batch_size (`int`, *optional*):
41
+ The batch size for the train and validation DataLoaders.
42
+ model_name (`str`, *optional*):
43
+ """
44
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
45
+ datasets = load_dataset("glue", "mrpc")
46
+
47
+ def tokenize_function(examples):
48
+ # max_length=None => use the model max length (it's actually the default)
49
+ outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
50
+ return outputs
51
+
52
+ # Apply the method we just defined to all the examples in all the splits of the dataset
53
+ tokenized_datasets = datasets.map(
54
+ tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False
55
+ )
56
+
57
+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
58
+ # transformers library
59
+ tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
60
+
61
+ def collate_fn(examples):
62
+ # On TPU it's best to pad everything to the same length or training will be very slow.
63
+ if accelerator.distributed_type == DistributedType.XLA:
64
+ return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
65
+ return tokenizer.pad(examples, padding="longest", return_tensors="pt")
66
+
67
+ # Instantiate dataloaders.
68
+ train_dataloader = DataLoader(
69
+ tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
70
+ )
71
+ eval_dataloader = DataLoader(
72
+ tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
73
+ )
74
+
75
+ return train_dataloader, eval_dataloader
76
+
77
+
78
+ def training_function(config, args):
79
+ # Initialize accelerator
80
+ accelerator = Accelerator()
81
+
82
+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
83
+ lr = config["lr"]
84
+ num_epochs = int(config["num_epochs"])
85
+ seed = int(config["seed"])
86
+ batch_size = int(config["batch_size"])
87
+ model_name = args.model_name_or_path
88
+
89
+ set_seed(seed)
90
+ train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name)
91
+
92
+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)
93
+ model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True)
94
+
95
+ # Instantiate optimizer
96
+ optimizer_cls = (
97
+ AdamW
98
+ if accelerator.state.deepspeed_plugin is None
99
+ or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
100
+ else DummyOptim
101
+ )
102
+ optimizer = optimizer_cls(params=model.parameters(), lr=lr)
103
+
104
+ max_training_steps = len(train_dataloader) * num_epochs
105
+
106
+ # Instantiate scheduler
107
+ linear_decay_scheduler = False
108
+ if (
109
+ accelerator.state.deepspeed_plugin is None
110
+ or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
111
+ ):
112
+ lr_scheduler = get_linear_schedule_with_warmup(
113
+ optimizer=optimizer,
114
+ num_warmup_steps=0,
115
+ num_training_steps=max_training_steps,
116
+ )
117
+ linear_decay_scheduler = True
118
+ else:
119
+ lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0)
120
+
121
+ # Prepare everything
122
+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
123
+ # prepare method.
124
+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
125
+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
126
+ )
127
+
128
+ # We also need to keep track of the stating epoch so files are named properly
129
+ starting_epoch = 0
130
+
131
+ # Now we train the model
132
+ metric = evaluate.load("glue", "mrpc")
133
+ best_performance = 0
134
+ performance_metric = {}
135
+ expected_lr_after_first_optim_step = lr * (
136
+ 1 - 1 / (max_training_steps / accelerator.num_processes / accelerator.gradient_accumulation_steps)
137
+ )
138
+ lr_scheduler_check_completed = False
139
+ for epoch in range(starting_epoch, num_epochs):
140
+ model.train()
141
+ for step, batch in enumerate(train_dataloader):
142
+ with accelerator.accumulate(model):
143
+ outputs = model(**batch)
144
+ loss = outputs.loss
145
+ accelerator.backward(loss)
146
+ optimizer.step()
147
+ lr_scheduler.step()
148
+ optimizer.zero_grad()
149
+
150
+ # assert the learning rate after first optimizer step
151
+ if (
152
+ accelerator.sync_gradients
153
+ and not lr_scheduler_check_completed
154
+ and linear_decay_scheduler
155
+ and accelerator.state.mixed_precision == "no"
156
+ ):
157
+ assert (
158
+ lr_scheduler.get_last_lr()[0] == expected_lr_after_first_optim_step
159
+ ), f"Wrong lr found at second step, expected {expected_lr_after_first_optim_step}, got {lr_scheduler.get_last_lr()[0]}"
160
+ lr_scheduler_check_completed = True
161
+
162
+ model.eval()
163
+ samples_seen = 0
164
+ for step, batch in enumerate(eval_dataloader):
165
+ # We could avoid this line since we set the accelerator with `device_placement=True`.
166
+ batch.to(accelerator.device)
167
+ with torch.no_grad():
168
+ outputs = model(**batch)
169
+ predictions = outputs.logits.argmax(dim=-1)
170
+ # It is slightly faster to call this once, than multiple times
171
+ predictions, references = accelerator.gather(
172
+ (predictions, batch["labels"])
173
+ ) # If we are in a multiprocess environment, the last batch has duplicates
174
+ if accelerator.use_distributed:
175
+ if step == len(eval_dataloader) - 1:
176
+ predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]
177
+ references = references[: len(eval_dataloader.dataset) - samples_seen]
178
+ else:
179
+ samples_seen += references.shape[0]
180
+ metric.add_batch(
181
+ predictions=predictions,
182
+ references=references,
183
+ )
184
+
185
+ eval_metric = metric.compute()
186
+ # Use accelerator.print to print only on the main process.
187
+ accelerator.print(f"epoch {epoch}:", eval_metric)
188
+ performance_metric[f"epoch-{epoch}"] = eval_metric["accuracy"]
189
+
190
+ if best_performance < eval_metric["accuracy"]:
191
+ best_performance = eval_metric["accuracy"]
192
+
193
+ # check that the LR is 0
194
+ if linear_decay_scheduler and accelerator.state.mixed_precision == "no":
195
+ assert (
196
+ lr_scheduler.get_last_lr()[0] == 0
197
+ ), f"Wrong lr found at last step, expected 0, got {lr_scheduler.get_last_lr()[0]}"
198
+
199
+ if args.performance_lower_bound is not None:
200
+ assert (
201
+ args.performance_lower_bound <= best_performance
202
+ ), f"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
203
+
204
+ accelerator.wait_for_everyone()
205
+ if accelerator.is_main_process:
206
+ with open(os.path.join(args.output_dir, "all_results.json"), "w") as f:
207
+ json.dump(performance_metric, f)
208
+
209
+
210
+ def main():
211
+ parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.")
212
+ parser.add_argument(
213
+ "--model_name_or_path",
214
+ type=str,
215
+ default="bert-base-cased",
216
+ help="Path to pretrained model or model identifier from huggingface.co/models.",
217
+ required=False,
218
+ )
219
+ parser.add_argument(
220
+ "--output_dir",
221
+ type=str,
222
+ default=".",
223
+ help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.",
224
+ )
225
+ parser.add_argument(
226
+ "--performance_lower_bound",
227
+ type=float,
228
+ default=None,
229
+ help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.",
230
+ )
231
+ parser.add_argument(
232
+ "--num_epochs",
233
+ type=int,
234
+ default=3,
235
+ help="Number of train epochs.",
236
+ )
237
+ args = parser.parse_args()
238
+ config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
239
+ training_function(config, args)
240
+
241
+
242
+ if __name__ == "__main__":
243
+ main()
venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_pippy.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import torch
15
+ from torchvision.models import resnet34
16
+ from transformers import (
17
+ BertConfig,
18
+ BertForMaskedLM,
19
+ GPT2Config,
20
+ GPT2ForSequenceClassification,
21
+ T5Config,
22
+ T5ForConditionalGeneration,
23
+ )
24
+
25
+ from accelerate import PartialState
26
+ from accelerate.inference import prepare_pippy
27
+ from accelerate.utils import DistributedType, send_to_device, set_seed
28
+
29
+
30
+ model_to_config = {
31
+ "t5": (T5ForConditionalGeneration, T5Config, 1024),
32
+ "bert": (BertForMaskedLM, BertConfig, 512),
33
+ "gpt2": (GPT2ForSequenceClassification, GPT2Config, 1024),
34
+ }
35
+
36
+
37
+ def get_model_and_data_for_text(model_name, device, num_processes: int = 2):
38
+ initializer, config, seq_len = model_to_config[model_name]
39
+ config_args = {}
40
+ # Eventually needed for batch inference tests on gpt-2 when bs != 1
41
+ # if model_name == "gpt2":
42
+ # config_args["pad_token_id"] = 0
43
+ model_config = config(**config_args)
44
+ model = initializer(model_config)
45
+ return model, torch.randint(
46
+ low=0,
47
+ high=model_config.vocab_size,
48
+ size=(num_processes, seq_len),
49
+ device=device,
50
+ dtype=torch.int64,
51
+ requires_grad=False,
52
+ )
53
+
54
+
55
+ def test_gpt2(batch_size: int = 2):
56
+ set_seed(42)
57
+ state = PartialState()
58
+ model, inputs = get_model_and_data_for_text("gpt2", "cpu", batch_size)
59
+ model = prepare_pippy(model, example_args=(inputs,), no_split_module_classes=model._no_split_modules)
60
+ # For inference args need to be a tuple
61
+ inputs = inputs.to("cuda")
62
+ with torch.no_grad():
63
+ output = model(inputs)
64
+ # Zach: Check that we just grab the real outputs we need at the end
65
+ if not state.is_last_process:
66
+ assert output is None, "Output was not generated on just the last process!"
67
+ else:
68
+ assert output is not None, "Output was not generated in the last process!"
69
+
70
+
71
+ def test_t5(batch_size: int = 2):
72
+ set_seed(42)
73
+ state = PartialState()
74
+ model, inputs = get_model_and_data_for_text("t5", "cpu", batch_size)
75
+ example_inputs = {"input_ids": inputs, "decoder_input_ids": inputs}
76
+ model = prepare_pippy(
77
+ model,
78
+ no_split_module_classes=model._no_split_modules,
79
+ example_kwargs=example_inputs,
80
+ )
81
+ # For inference args need to be a tuple
82
+ inputs = send_to_device(example_inputs, "cuda:0")
83
+ with torch.no_grad():
84
+ output = model(*inputs.values())
85
+ # Zach: Check that we just grab the real outputs we need at the end
86
+ if not state.is_last_process:
87
+ assert output is None, "Output was not generated on just the last process!"
88
+ else:
89
+ assert output is not None, "Output was not generated in the last process!"
90
+
91
+
92
+ def test_resnet(batch_size: int = 2):
93
+ set_seed(42)
94
+ state = PartialState()
95
+ model = resnet34()
96
+ input_tensor = torch.rand(batch_size, 3, 224, 224)
97
+ model = prepare_pippy(
98
+ model,
99
+ example_args=(input_tensor,),
100
+ )
101
+ inputs = send_to_device(input_tensor, "cuda:0")
102
+ with torch.no_grad():
103
+ output = model(inputs)
104
+ # Zach: Check that we just grab the real outputs we need at the end
105
+ if not state.is_last_process:
106
+ assert output is None, "Output was not generated on just the last process!"
107
+ else:
108
+ assert output is not None, "Output was not generated in the last process!"
109
+
110
+
111
+ if __name__ == "__main__":
112
+ state = PartialState()
113
+ state.print("Testing pippy integration...")
114
+ if state.distributed_type == DistributedType.MULTI_GPU:
115
+ state.print("Testing GPT2...")
116
+ test_gpt2()
117
+ # Issue: When modifying the tokenizer for batch GPT2 inference, there's an issue
118
+ # due to references
119
+ # NameError: cannot access free variable 'chunk_args_list' where it is not associated with a value in enclosing scope
120
+ # test_gpt2(3)
121
+ state.print("Testing T5...")
122
+ test_t5()
123
+ test_t5(1)
124
+ test_t5(3)
125
+ state.print("Testing CV model...")
126
+ test_resnet()
127
+ test_resnet(3)
128
+ else:
129
+ print("Less than two GPUs found, not running tests!")
venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_zero3_integration.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import torch.distributed
16
+
17
+ from accelerate.test_utils import require_huggingface_suite
18
+ from accelerate.utils import is_transformers_available
19
+
20
+
21
+ if is_transformers_available():
22
+ from transformers import AutoModel, TrainingArguments
23
+
24
+
25
+ GPT2_TINY = "sshleifer/tiny-gpt2"
26
+
27
+
28
+ @require_huggingface_suite
29
+ def init_torch_dist_then_launch_deepspeed():
30
+ torch.distributed.init_process_group(backend="nccl")
31
+ deepspeed_config = {
32
+ "zero_optimization": {
33
+ "stage": 3,
34
+ },
35
+ "train_batch_size": "auto",
36
+ "train_micro_batch_size_per_gpu": "auto",
37
+ }
38
+ train_args = TrainingArguments(
39
+ output_dir="./",
40
+ deepspeed=deepspeed_config,
41
+ )
42
+ model = AutoModel.from_pretrained(GPT2_TINY)
43
+ assert train_args is not None
44
+ assert model is not None
45
+
46
+
47
+ def main():
48
+ init_torch_dist_then_launch_deepspeed()
49
+
50
+
51
+ if __name__ == "__main__":
52
+ main()
venv/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_cli.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import torch
15
+
16
+
17
+ def main():
18
+ if torch.cuda.is_available():
19
+ num_gpus = torch.cuda.device_count()
20
+ else:
21
+ num_gpus = 0
22
+ print(f"Successfully ran on {num_gpus} GPUs")
23
+
24
+
25
+ if __name__ == "__main__":
26
+ main()