applied-ai-018 commited on
Commit
1d06125
·
verified ·
1 Parent(s): 1340f33

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/__init__.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/accelerate_cli.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/env.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/estimate.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/launch.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/test.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/tpu.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/utils.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__init__.py +52 -0
  10. llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/__init__.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/cluster.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_args.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_utils.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/default.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/sagemaker.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/update.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/cluster.py +717 -0
  19. llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/config.py +89 -0
  20. llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/config_args.py +243 -0
  21. llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/config_utils.py +101 -0
  22. llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/default.py +133 -0
  23. llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/update.py +63 -0
  24. llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/__init__.py +51 -0
  25. llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/__init__.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/examples.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/testing.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/training.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/examples.py +146 -0
  30. llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__init__.py +13 -0
  31. llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/__init__.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_cli.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_distributed_data_loop.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_notebook.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_ops.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_script.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_sync.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__init__.py +13 -0
  39. llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/__init__.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_checkpointing.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_metrics.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_peak_memory_usage.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_performance.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_pippy.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_zero3_integration.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_checkpointing.py +268 -0
  47. llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_metrics.py +306 -0
  48. llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py +282 -0
  49. llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_performance.py +243 -0
  50. llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_pippy.py +129 -0
llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (192 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/accelerate_cli.cpython-310.pyc ADDED
Binary file (1.23 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/env.cpython-310.pyc ADDED
Binary file (2.91 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/estimate.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/launch.cpython-310.pyc ADDED
Binary file (28.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/test.cpython-310.pyc ADDED
Binary file (1.66 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/tpu.cpython-310.pyc ADDED
Binary file (3.86 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/commands/__pycache__/utils.cpython-310.pyc ADDED
Binary file (3.71 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__init__.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+
19
+ from .config import config_command_parser
20
+ from .config_args import default_config_file, load_config_from_file # noqa: F401
21
+ from .default import default_command_parser
22
+ from .update import update_command_parser
23
+
24
+
25
+ def get_config_parser(subparsers=None):
26
+ parent_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
27
+ # The main config parser
28
+ config_parser = config_command_parser(subparsers)
29
+ # The subparser to add commands to
30
+ subcommands = config_parser.add_subparsers(title="subcommands", dest="subcommand")
31
+
32
+ # Then add other parsers with the parent parser
33
+ default_command_parser(subcommands, parents=[parent_parser])
34
+ update_command_parser(subcommands, parents=[parent_parser])
35
+
36
+ return config_parser
37
+
38
+
39
+ def main():
40
+ config_parser = get_config_parser()
41
+ args = config_parser.parse_args()
42
+
43
+ if not hasattr(args, "func"):
44
+ config_parser.print_help()
45
+ exit(1)
46
+
47
+ # Run
48
+ args.func(args)
49
+
50
+
51
+ if __name__ == "__main__":
52
+ main()
llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/cluster.cpython-310.pyc ADDED
Binary file (15.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config.cpython-310.pyc ADDED
Binary file (2.45 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_args.cpython-310.pyc ADDED
Binary file (7.14 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_utils.cpython-310.pyc ADDED
Binary file (2.76 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/default.cpython-310.pyc ADDED
Binary file (3.93 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/sagemaker.cpython-310.pyc ADDED
Binary file (6.88 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/update.cpython-310.pyc ADDED
Binary file (1.87 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/cluster.py ADDED
@@ -0,0 +1,717 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import os
18
+
19
+ from ...utils import (
20
+ ComputeEnvironment,
21
+ DistributedType,
22
+ is_deepspeed_available,
23
+ is_mlu_available,
24
+ is_mps_available,
25
+ is_npu_available,
26
+ is_transformers_available,
27
+ is_xpu_available,
28
+ )
29
+ from ...utils.constants import (
30
+ DEEPSPEED_MULTINODE_LAUNCHERS,
31
+ FSDP_AUTO_WRAP_POLICY,
32
+ FSDP_BACKWARD_PREFETCH,
33
+ FSDP_SHARDING_STRATEGY,
34
+ FSDP_STATE_DICT_TYPE,
35
+ TORCH_DYNAMO_MODES,
36
+ )
37
+ from .config_args import ClusterConfig
38
+ from .config_utils import (
39
+ DYNAMO_BACKENDS,
40
+ _ask_field,
41
+ _ask_options,
42
+ _convert_distributed_mode,
43
+ _convert_dynamo_backend,
44
+ _convert_mixed_precision,
45
+ _convert_yes_no_to_bool,
46
+ )
47
+
48
+
49
+ def get_cluster_input():
50
+ distributed_type = _ask_options(
51
+ "Which type of machine are you using?",
52
+ ["No distributed training", "multi-CPU", "multi-XPU", "multi-GPU", "multi-NPU", "multi-MLU", "TPU"],
53
+ _convert_distributed_mode,
54
+ )
55
+
56
+ machine_rank = 0
57
+ num_machines = 1
58
+ num_processes = 1
59
+ gpu_ids = None
60
+ main_process_ip = None
61
+ main_process_port = None
62
+ rdzv_backend = "static"
63
+ same_network = True
64
+ debug = False
65
+
66
+ if distributed_type in [
67
+ DistributedType.MULTI_GPU,
68
+ DistributedType.MULTI_MLU,
69
+ DistributedType.MULTI_NPU,
70
+ DistributedType.MULTI_XPU,
71
+ DistributedType.MULTI_CPU,
72
+ ]:
73
+ num_machines = _ask_field(
74
+ "How many different machines will you use (use more than 1 for multi-node training)? [1]: ",
75
+ int,
76
+ default=1,
77
+ )
78
+ if num_machines > 1:
79
+ machine_rank = _ask_options(
80
+ "What is the rank of this machine?",
81
+ list(range(num_machines)),
82
+ int,
83
+ )
84
+ main_process_ip = _ask_field(
85
+ "What is the IP address of the machine that will host the main process? ",
86
+ )
87
+ main_process_port = _ask_field(
88
+ "What is the port you will use to communicate with the main process? ",
89
+ int,
90
+ )
91
+ same_network = _ask_field(
92
+ "Are all the machines on the same local network? Answer `no` if nodes are on the cloud and/or on different network hosts [YES/no]: ",
93
+ _convert_yes_no_to_bool,
94
+ default=True,
95
+ error_message="Please enter yes or no.",
96
+ )
97
+ if not same_network:
98
+ rdzv_backend = _ask_field(
99
+ "What rendezvous backend will you use? ('static', 'c10d', ...): ", default="static"
100
+ )
101
+ debug = _ask_field(
102
+ "Should distributed operations be checked while running for errors? This can avoid timeout issues but will be slower. [yes/NO]: ",
103
+ _convert_yes_no_to_bool,
104
+ default=False,
105
+ error_message="Please enter yes or no.",
106
+ )
107
+
108
+ if distributed_type == DistributedType.NO:
109
+ use_cpu = _ask_field(
110
+ "Do you want to run your training on CPU only (even if a GPU / Apple Silicon / Ascend NPU device is available)? [yes/NO]:",
111
+ _convert_yes_no_to_bool,
112
+ default=False,
113
+ error_message="Please enter yes or no.",
114
+ )
115
+ elif distributed_type == DistributedType.MULTI_CPU:
116
+ use_cpu = True
117
+ else:
118
+ use_cpu = False
119
+
120
+ ipex_config = {}
121
+ mpirun_config = {}
122
+ if use_cpu:
123
+ ipex_config["ipex"] = _ask_field(
124
+ "Do you want to use Intel PyTorch Extension (IPEX) to speed up training on CPU? [yes/NO]:",
125
+ _convert_yes_no_to_bool,
126
+ default=False,
127
+ error_message="Please enter yes or no.",
128
+ )
129
+ if distributed_type == DistributedType.MULTI_CPU:
130
+ use_mpirun = _ask_field(
131
+ "Do you want accelerate to launch mpirun? [yes/NO]: ",
132
+ _convert_yes_no_to_bool,
133
+ default=False,
134
+ error_message="Please enter yes or no.",
135
+ )
136
+ if use_mpirun:
137
+ mpirun_hostfile = _ask_field(
138
+ "Please enter the path to the hostfile to use with mpirun [~/hostfile]: ",
139
+ str,
140
+ default="~/hostfile",
141
+ )
142
+ mpirun_config["mpirun_hostfile"] = os.path.expanduser(mpirun_hostfile.strip())
143
+ mpirun_config["mpirun_ccl"] = _ask_field("Enter the number of oneCCL worker threads [1]: ", default=1)
144
+ if (
145
+ not use_cpu
146
+ and is_xpu_available()
147
+ and distributed_type
148
+ not in [DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU, DistributedType.XLA]
149
+ ):
150
+ ipex_config["use_xpu"] = _ask_field(
151
+ "Do you want to use XPU plugin to speed up training on XPU? [yes/NO]:",
152
+ _convert_yes_no_to_bool,
153
+ default=False,
154
+ error_message="Please enter yes or no.",
155
+ )
156
+
157
+ dynamo_config = {}
158
+ use_dynamo = _ask_field(
159
+ "Do you wish to optimize your script with torch dynamo?[yes/NO]:",
160
+ _convert_yes_no_to_bool,
161
+ default=False,
162
+ error_message="Please enter yes or no.",
163
+ )
164
+ if use_dynamo:
165
+ prefix = "dynamo_"
166
+ dynamo_config[prefix + "backend"] = _ask_options(
167
+ "Which dynamo backend would you like to use?",
168
+ [x.lower() for x in DYNAMO_BACKENDS],
169
+ _convert_dynamo_backend,
170
+ default=2,
171
+ )
172
+ use_custom_options = _ask_field(
173
+ "Do you want to customize the defaults sent to torch.compile? [yes/NO]: ",
174
+ _convert_yes_no_to_bool,
175
+ default=False,
176
+ error_message="Please enter yes or no.",
177
+ )
178
+
179
+ if use_custom_options:
180
+ dynamo_config[prefix + "mode"] = _ask_options(
181
+ "Which mode do you want to use?",
182
+ TORCH_DYNAMO_MODES,
183
+ lambda x: TORCH_DYNAMO_MODES[int(x)],
184
+ default=0,
185
+ )
186
+ dynamo_config[prefix + "use_fullgraph"] = _ask_field(
187
+ "Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ",
188
+ _convert_yes_no_to_bool,
189
+ default=False,
190
+ error_message="Please enter yes or no.",
191
+ )
192
+ dynamo_config[prefix + "use_dynamic"] = _ask_field(
193
+ "Do you want to enable dynamic shape tracing? [yes/NO]: ",
194
+ _convert_yes_no_to_bool,
195
+ default=False,
196
+ error_message="Please enter yes or no.",
197
+ )
198
+
199
+ use_mps = not use_cpu and is_mps_available()
200
+ deepspeed_config = {}
201
+ if (
202
+ distributed_type
203
+ in [
204
+ DistributedType.MULTI_GPU,
205
+ DistributedType.MULTI_XPU,
206
+ DistributedType.MULTI_NPU,
207
+ DistributedType.MULTI_MLU,
208
+ DistributedType.NO,
209
+ ]
210
+ and not use_mps
211
+ ):
212
+ use_deepspeed = _ask_field(
213
+ "Do you want to use DeepSpeed? [yes/NO]: ",
214
+ _convert_yes_no_to_bool,
215
+ default=False,
216
+ error_message="Please enter yes or no.",
217
+ )
218
+ if use_deepspeed:
219
+ distributed_type = DistributedType.DEEPSPEED
220
+ assert (
221
+ is_deepspeed_available()
222
+ ), "DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source"
223
+
224
+ if distributed_type == DistributedType.DEEPSPEED:
225
+ use_deepspeed_config = _ask_field(
226
+ "Do you want to specify a json file to a DeepSpeed config? [yes/NO]: ",
227
+ _convert_yes_no_to_bool,
228
+ default=False,
229
+ error_message="Please enter yes or no.",
230
+ )
231
+ if use_deepspeed_config:
232
+ deepspeed_config["deepspeed_config_file"] = _ask_field(
233
+ "Please enter the path to the json DeepSpeed config file: ",
234
+ str,
235
+ default="none",
236
+ )
237
+ else:
238
+ deepspeed_config["zero_stage"] = _ask_options(
239
+ "What should be your DeepSpeed's ZeRO optimization stage?",
240
+ [0, 1, 2, 3],
241
+ int,
242
+ default=2,
243
+ )
244
+
245
+ deepspeed_devices = ["none", "cpu", "nvme"]
246
+ if deepspeed_config["zero_stage"] >= 2:
247
+ deepspeed_config["offload_optimizer_device"] = _ask_options(
248
+ "Where to offload optimizer states?", deepspeed_devices, lambda x: deepspeed_devices[int(x)]
249
+ )
250
+ deepspeed_config["offload_param_device"] = _ask_options(
251
+ "Where to offload parameters?", deepspeed_devices, lambda x: deepspeed_devices[int(x)]
252
+ )
253
+ if deepspeed_config["offload_param_device"] == "nvme":
254
+ deepspeed_config["offload_param_nvme_path"] = _ask_field(
255
+ "Nvme Path to offload parameters?",
256
+ str,
257
+ default="/nvme",
258
+ )
259
+ if deepspeed_config["offload_optimizer_device"] == "nvme":
260
+ deepspeed_config["offload_optimizer_nvme_path"] = _ask_field(
261
+ "Nvme Path to offload optimizer states?",
262
+ str,
263
+ default="/nvme",
264
+ )
265
+ deepspeed_config["gradient_accumulation_steps"] = _ask_field(
266
+ "How many gradient accumulation steps you're passing in your script? [1]: ",
267
+ int,
268
+ default=1,
269
+ )
270
+ use_gradient_clipping = _ask_field(
271
+ "Do you want to use gradient clipping? [yes/NO]: ",
272
+ _convert_yes_no_to_bool,
273
+ default=False,
274
+ error_message="Please enter yes or no.",
275
+ )
276
+ if use_gradient_clipping:
277
+ deepspeed_config["gradient_clipping"] = _ask_field(
278
+ "What is the gradient clipping value? [1.0]: ",
279
+ float,
280
+ default=1.0,
281
+ )
282
+ if deepspeed_config["zero_stage"] == 3:
283
+ deepspeed_config["zero3_save_16bit_model"] = _ask_field(
284
+ "Do you want to save 16-bit model weights when using ZeRO Stage-3? [yes/NO]: ",
285
+ _convert_yes_no_to_bool,
286
+ default=False,
287
+ error_message="Please enter yes or no.",
288
+ )
289
+ deepspeed_config["zero3_init_flag"] = _ask_field(
290
+ "Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: ",
291
+ _convert_yes_no_to_bool,
292
+ default=False,
293
+ error_message="Please enter yes or no.",
294
+ )
295
+ if deepspeed_config["zero3_init_flag"]:
296
+ if not is_transformers_available():
297
+ raise Exception(
298
+ "When `zero3_init_flag` is set, it requires Transformers to be installed. "
299
+ "Please run `pip3 install transformers`."
300
+ )
301
+ use_moe = _ask_field(
302
+ "Do you want to enable Mixture-of-Experts training (MoE)? [yes/NO]: ",
303
+ _convert_yes_no_to_bool,
304
+ default=False,
305
+ error_message="Please enter yes or no.",
306
+ )
307
+ if use_moe:
308
+ deepspeed_config["deepspeed_moe_layer_cls_names"] = _ask_field(
309
+ "Specify the comma-separated list of transformers MoE layer class names (case-sensitive), e.g : "
310
+ " `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ... : ",
311
+ str,
312
+ )
313
+
314
+ if num_machines > 1:
315
+ launcher_query = "Which Type of launcher do you want to use?"
316
+ deepspeed_config["deepspeed_multinode_launcher"] = _ask_options(
317
+ launcher_query,
318
+ DEEPSPEED_MULTINODE_LAUNCHERS,
319
+ lambda x: DEEPSPEED_MULTINODE_LAUNCHERS[int(x)],
320
+ )
321
+
322
+ if deepspeed_config["deepspeed_multinode_launcher"] != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
323
+ deepspeed_config["deepspeed_hostfile"] = _ask_field(
324
+ "DeepSpeed configures multi-node compute resources with hostfile. "
325
+ "Each row is of the format `hostname slots=[num_gpus]`, e.g., `localhost slots=2`; "
326
+ "for more information please refer official [documentation]"
327
+ "(https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node). "
328
+ "Please specify the location of hostfile: ",
329
+ str,
330
+ )
331
+
332
+ is_exclusion_filter = _ask_field(
333
+ "Do you want to specify exclusion filter string? [yes/NO]: ",
334
+ _convert_yes_no_to_bool,
335
+ default=False,
336
+ error_message="Please enter yes or no.",
337
+ )
338
+ if is_exclusion_filter:
339
+ deepspeed_config["deepspeed_exclusion_filter"] = _ask_field(
340
+ "DeepSpeed exclusion filter string: ",
341
+ str,
342
+ )
343
+
344
+ is_inclusion_filter = _ask_field(
345
+ "Do you want to specify inclusion filter string? [yes/NO]: ",
346
+ _convert_yes_no_to_bool,
347
+ default=False,
348
+ error_message="Please enter yes or no.",
349
+ )
350
+ if is_inclusion_filter:
351
+ deepspeed_config["deepspeed_inclusion_filter"] = _ask_field(
352
+ "DeepSpeed inclusion filter string: ",
353
+ str,
354
+ )
355
+
356
+ fsdp_config = {}
357
+ if distributed_type in [
358
+ DistributedType.MULTI_GPU,
359
+ DistributedType.MULTI_NPU,
360
+ DistributedType.MULTI_MLU,
361
+ DistributedType.MULTI_XPU,
362
+ ]:
363
+ use_fsdp = _ask_field(
364
+ "Do you want to use FullyShardedDataParallel? [yes/NO]: ",
365
+ _convert_yes_no_to_bool,
366
+ default=False,
367
+ error_message="Please enter yes or no.",
368
+ )
369
+ if use_fsdp:
370
+ distributed_type = DistributedType.FSDP
371
+ if distributed_type == DistributedType.FSDP:
372
+ sharding_strategy_query = "What should be your sharding strategy?"
373
+ fsdp_config["fsdp_sharding_strategy"] = _ask_options(
374
+ sharding_strategy_query,
375
+ FSDP_SHARDING_STRATEGY,
376
+ lambda x: FSDP_SHARDING_STRATEGY[int(x)],
377
+ )
378
+ fsdp_config["fsdp_offload_params"] = _ask_field(
379
+ "Do you want to offload parameters and gradients to CPU? [yes/NO]: ",
380
+ _convert_yes_no_to_bool,
381
+ default=False,
382
+ error_message="Please enter yes or no.",
383
+ )
384
+ fsdp_wrap_query = "What should be your auto wrap policy?"
385
+ fsdp_config["fsdp_auto_wrap_policy"] = _ask_options(
386
+ fsdp_wrap_query,
387
+ FSDP_AUTO_WRAP_POLICY,
388
+ lambda x: FSDP_AUTO_WRAP_POLICY[int(x)],
389
+ )
390
+ if fsdp_config["fsdp_auto_wrap_policy"] == FSDP_AUTO_WRAP_POLICY[0]:
391
+ use_no_split_modules = _ask_field(
392
+ "Do you want to use the model's `_no_split_modules` to wrap. Only applicable for 🤗 Transformers [yes/NO]: ",
393
+ _convert_yes_no_to_bool,
394
+ default=False,
395
+ error_message="Please enter yes or no.",
396
+ )
397
+ if not use_no_split_modules:
398
+ fsdp_config["fsdp_transformer_layer_cls_to_wrap"] = _ask_field(
399
+ "Specify the comma-separated list of transformer layer class names (case-sensitive) to wrap ,e.g, :"
400
+ "`BertLayer`, `GPTJBlock`, `T5Block`, `BertLayer,BertEmbeddings,BertSelfOutput` ...? : ",
401
+ str,
402
+ )
403
+ elif fsdp_config["fsdp_auto_wrap_policy"] == FSDP_AUTO_WRAP_POLICY[1]:
404
+ fsdp_config["fsdp_min_num_params"] = _ask_field(
405
+ "What should be your FSDP's minimum number of parameters for Default Auto Wrapping Policy? [1e8]: ",
406
+ int,
407
+ default=100000000,
408
+ )
409
+ fsdp_backward_prefetch_query = "What should be your FSDP's backward prefetch policy?"
410
+ fsdp_config["fsdp_backward_prefetch"] = _ask_options(
411
+ fsdp_backward_prefetch_query,
412
+ FSDP_BACKWARD_PREFETCH,
413
+ lambda x: FSDP_BACKWARD_PREFETCH[int(x)],
414
+ )
415
+ fsdp_state_dict_type_query = "What should be your FSDP's state dict type?"
416
+ fsdp_config["fsdp_state_dict_type"] = _ask_options(
417
+ fsdp_state_dict_type_query,
418
+ FSDP_STATE_DICT_TYPE,
419
+ lambda x: FSDP_STATE_DICT_TYPE[int(x)],
420
+ default=2,
421
+ )
422
+ fsdp_config["fsdp_forward_prefetch"] = _ask_field(
423
+ "Do you want to enable FSDP's forward prefetch policy? [yes/NO]: ",
424
+ _convert_yes_no_to_bool,
425
+ default=False,
426
+ error_message="Please enter yes or no.",
427
+ )
428
+ fsdp_config["fsdp_use_orig_params"] = _ask_field(
429
+ "Do you want to enable FSDP's `use_orig_params` feature? [YES/no]: ",
430
+ _convert_yes_no_to_bool,
431
+ default=True,
432
+ error_message="Please enter yes or no.",
433
+ )
434
+ fsdp_config["fsdp_cpu_ram_efficient_loading"] = _ask_field(
435
+ "Do you want to enable CPU RAM efficient model loading? Only applicable for 🤗 Transformers models. [YES/no]: ",
436
+ _convert_yes_no_to_bool,
437
+ default=True,
438
+ error_message="Please enter yes or no.",
439
+ )
440
+ if fsdp_config["fsdp_cpu_ram_efficient_loading"]:
441
+ fsdp_config["fsdp_sync_module_states"] = True
442
+ else:
443
+ fsdp_config["fsdp_sync_module_states"] = _ask_field(
444
+ "Do you want each individually wrapped FSDP unit to broadcast module parameters from rank 0 at the start? [YES/no]: ",
445
+ _convert_yes_no_to_bool,
446
+ default=True,
447
+ error_message="Please enter yes or no.",
448
+ )
449
+
450
+ megatron_lm_config = {}
451
+ if distributed_type in [DistributedType.MULTI_GPU]:
452
+ use_megatron_lm = _ask_field(
453
+ "Do you want to use Megatron-LM ? [yes/NO]: ",
454
+ _convert_yes_no_to_bool,
455
+ default=False,
456
+ error_message="Please enter yes or no.",
457
+ )
458
+ if use_megatron_lm:
459
+ distributed_type = DistributedType.MEGATRON_LM
460
+ if distributed_type == DistributedType.MEGATRON_LM:
461
+ prefix = "megatron_lm_"
462
+ megatron_lm_config[prefix + "tp_degree"] = _ask_field(
463
+ "What is the Tensor Parallelism degree/size? [1]:",
464
+ int,
465
+ default=1,
466
+ error_message="Please enter an integer.",
467
+ )
468
+ if megatron_lm_config[prefix + "tp_degree"] > 1:
469
+ megatron_lm_config[prefix + "sequence_parallelism"] = _ask_field(
470
+ "Do you want to enable Sequence Parallelism? [YES/no]: ",
471
+ _convert_yes_no_to_bool,
472
+ default=True,
473
+ error_message="Please enter yes or no.",
474
+ )
475
+
476
+ megatron_lm_config[prefix + "pp_degree"] = _ask_field(
477
+ "What is the Pipeline Parallelism degree/size? [1]:",
478
+ int,
479
+ default=1,
480
+ error_message="Please enter an integer.",
481
+ )
482
+ if megatron_lm_config[prefix + "pp_degree"] > 1:
483
+ megatron_lm_config[prefix + "num_micro_batches"] = _ask_field(
484
+ "What is the number of micro-batches? [1]:",
485
+ int,
486
+ default=1,
487
+ error_message="Please enter an integer.",
488
+ )
489
+
490
+ megatron_lm_config[prefix + "recompute_activations"] = _ask_field(
491
+ "Do you want to enable selective activation recomputation? [YES/no]: ",
492
+ _convert_yes_no_to_bool,
493
+ default=True,
494
+ error_message="Please enter yes or no.",
495
+ )
496
+
497
+ megatron_lm_config[prefix + "use_distributed_optimizer"] = _ask_field(
498
+ "Do you want to use distributed optimizer "
499
+ "which shards optimizer state and gradients across data parallel ranks? [YES/no]: ",
500
+ _convert_yes_no_to_bool,
501
+ default=True,
502
+ error_message="Please enter yes or no.",
503
+ )
504
+
505
+ megatron_lm_config[prefix + "gradient_clipping"] = _ask_field(
506
+ "What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]: ",
507
+ float,
508
+ default=1.0,
509
+ )
510
+ # TPU specific defaults
511
+ tpu_commands = None
512
+ tpu_command_file = None
513
+ tpu_downcast_bf16 = "no"
514
+ tpu_env = []
515
+ tpu_name = None
516
+ tpu_vm = None
517
+ tpu_zone = None
518
+ tpu_use_sudo = False
519
+ tpu_use_cluster = False
520
+
521
+ if distributed_type in [
522
+ DistributedType.MULTI_CPU,
523
+ DistributedType.MULTI_XPU,
524
+ DistributedType.MULTI_GPU,
525
+ DistributedType.MULTI_MLU,
526
+ DistributedType.MULTI_NPU,
527
+ DistributedType.XLA,
528
+ ]:
529
+ machine_type = str(distributed_type).split(".")[1].replace("MULTI_", "")
530
+ if machine_type == "TPU":
531
+ machine_type += " cores"
532
+ elif machine_type == "CPU":
533
+ machine_type = "processes"
534
+ else:
535
+ machine_type += "(s)"
536
+ num_processes = _ask_field(
537
+ f"How many {machine_type} should be used for distributed training? [1]:",
538
+ int,
539
+ default=1,
540
+ error_message="Please enter an integer.",
541
+ )
542
+ elif distributed_type in [DistributedType.FSDP, DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]:
543
+ num_processes = _ask_field(
544
+ "How many GPU(s) should be used for distributed training? [1]:",
545
+ int,
546
+ default=1,
547
+ error_message="Please enter an integer.",
548
+ )
549
+ else:
550
+ num_processes = 1
551
+
552
+ if (distributed_type == DistributedType.MULTI_GPU) and (num_machines == 1) and (num_processes == 1):
553
+ raise ValueError(
554
+ f"Specified distributed type {distributed_type} but only using 1 GPU on a single machine. Please select `No distributed training` for the type of machine you are using."
555
+ )
556
+
557
+ if (
558
+ distributed_type
559
+ in [
560
+ DistributedType.MULTI_GPU,
561
+ DistributedType.MULTI_MLU,
562
+ DistributedType.MULTI_NPU,
563
+ DistributedType.MULTI_XPU,
564
+ DistributedType.NO,
565
+ ]
566
+ and not use_cpu
567
+ and not use_mps
568
+ ):
569
+ if is_npu_available():
570
+ machine_type = "NPU(s)"
571
+ elif is_mlu_available():
572
+ machine_type = "MLU(s)"
573
+ else:
574
+ machine_type = "GPU(s)"
575
+ gpu_ids = _ask_field(
576
+ f"What {machine_type} (by id) should be used for training on this machine as a comma-seperated list? [all]:",
577
+ default="all",
578
+ )
579
+
580
+ # CPU affinity is only supported on NVIDIA hardware for now
581
+ enable_cpu_affinity = False
582
+ if distributed_type == (DistributedType.NO, DistributedType.MULTI_GPU) and not use_cpu and not use_mps:
583
+ enable_cpu_affinity = _ask_field(
584
+ "Would you like to enable numa efficiency? (Currently only supported on NVIDIA hardware). [yes/NO]: ",
585
+ _convert_yes_no_to_bool,
586
+ default=False,
587
+ error_message="Please enter yes or no.",
588
+ )
589
+
590
+ if distributed_type == DistributedType.XLA:
591
+ mixed_precision = "no"
592
+ main_training_function = _ask_field(
593
+ "What is the name of the function in your script that should be launched in all parallel scripts? [main]: ",
594
+ default="main",
595
+ )
596
+ tpu_use_cluster = _ask_field(
597
+ "Are you using a TPU cluster? [yes/NO]: ",
598
+ _convert_yes_no_to_bool,
599
+ default=False,
600
+ error_message="Please enter yes or no.",
601
+ )
602
+ if tpu_use_cluster:
603
+ tpu_name = _ask_field(
604
+ "What is the name of your TPU cluster? ",
605
+ default=None,
606
+ error_message="Please enter the name of your TPU cluster.",
607
+ )
608
+ tpu_zone = _ask_field(
609
+ "What is the zone of your TPU cluster? ",
610
+ default=None,
611
+ error_message="Please enter the zone of your TPU cluster.",
612
+ )
613
+ tpu_use_sudo = _ask_field(
614
+ "To run a python script in a TPU pod, should `sudo` be used? [yes/NO]: ",
615
+ default=False,
616
+ error_message="Please enter yes or no.",
617
+ )
618
+ run_commands = _ask_field(
619
+ "Do you have code you wish to run on startup in each pod? [yes/NO]: ",
620
+ _convert_yes_no_to_bool,
621
+ default=False,
622
+ error_message="Please enter yes or no.",
623
+ )
624
+ if run_commands:
625
+ use_command_file = _ask_field(
626
+ "Is this code located in a bash script? [yes/NO]: ",
627
+ _convert_yes_no_to_bool,
628
+ default=False,
629
+ error_message="Please enter yes or no.",
630
+ )
631
+ if use_command_file:
632
+ tpu_command_file = _ask_field(
633
+ "What is the path to your bash script? ",
634
+ default=None,
635
+ error_message="Please enter the path to your bash script.",
636
+ )
637
+ tpu_command_file = os.path.abspath(tpu_command_file)
638
+ else:
639
+ print("Please enter each command seperately you wish to run on startup in each pod.")
640
+ tpu_commands = []
641
+ another_command = True
642
+ while another_command:
643
+ tpu_commands.append(
644
+ _ask_field(
645
+ "Please enter a single command to be ran ",
646
+ default=None,
647
+ error_message="Please enter the commands you wish to run on startup in each pod as a single string.",
648
+ )
649
+ )
650
+ another_command = _ask_field(
651
+ "Do you wish to add another command? [yes/NO]: ",
652
+ _convert_yes_no_to_bool,
653
+ default=False,
654
+ error_message="Please enter yes or no.",
655
+ )
656
+ tpu_vm = _ask_field(
657
+ "If not using an instance group, what are the names of the Compute VM instances to be used, seperated by a comma: ",
658
+ default="",
659
+ ).split(",")
660
+ tpu_env = _ask_field(
661
+ "What environment variables do you wish to set in each pod, seperated by a comma: ",
662
+ default="",
663
+ ).split(",")
664
+
665
+ else:
666
+ main_training_function = "main"
667
+ if distributed_type == DistributedType.DEEPSPEED and use_deepspeed_config:
668
+ mixed_precision = None
669
+ else:
670
+ mixed_precision = _ask_options(
671
+ "Do you wish to use FP16 or BF16 (mixed precision)?",
672
+ ["no", "fp16", "bf16", "fp8"],
673
+ _convert_mixed_precision,
674
+ )
675
+
676
+ if use_dynamo and mixed_precision == "no" and not use_cpu:
677
+ print(
678
+ "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts."
679
+ )
680
+
681
+ if distributed_type == DistributedType.XLA and mixed_precision == "bf16":
682
+ tpu_downcast_bf16 = _ask_field(
683
+ "Should `torch.float` be cast as `bfloat16` and `torch.double` remain `float32` on TPUs?", default="no"
684
+ )
685
+
686
+ return ClusterConfig(
687
+ compute_environment=ComputeEnvironment.LOCAL_MACHINE,
688
+ distributed_type=distributed_type,
689
+ num_processes=num_processes,
690
+ gpu_ids=gpu_ids,
691
+ mixed_precision=mixed_precision,
692
+ downcast_bf16=tpu_downcast_bf16,
693
+ machine_rank=machine_rank,
694
+ num_machines=num_machines,
695
+ main_process_ip=main_process_ip,
696
+ main_process_port=main_process_port,
697
+ main_training_function=main_training_function,
698
+ deepspeed_config=deepspeed_config,
699
+ fsdp_config=fsdp_config,
700
+ megatron_lm_config=megatron_lm_config,
701
+ ipex_config=ipex_config,
702
+ mpirun_config=mpirun_config,
703
+ use_cpu=use_cpu,
704
+ rdzv_backend=rdzv_backend,
705
+ same_network=same_network,
706
+ commands=tpu_commands,
707
+ command_file=tpu_command_file,
708
+ tpu_env=tpu_env,
709
+ tpu_name=tpu_name,
710
+ tpu_vm=tpu_vm,
711
+ tpu_zone=tpu_zone,
712
+ tpu_use_sudo=tpu_use_sudo,
713
+ tpu_use_cluster=tpu_use_cluster,
714
+ dynamo_config=dynamo_config,
715
+ debug=debug,
716
+ enable_cpu_affinity=enable_cpu_affinity,
717
+ )
llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/config.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+ import os
19
+
20
+ from accelerate.utils import ComputeEnvironment
21
+
22
+ from .cluster import get_cluster_input
23
+ from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
24
+ from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
25
+ from .sagemaker import get_sagemaker_input
26
+
27
+
28
+ description = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
29
+
30
+
31
+ def get_user_input():
32
+ compute_environment = _ask_options(
33
+ "In which compute environment are you running?",
34
+ ["This machine", "AWS (Amazon SageMaker)"],
35
+ _convert_compute_environment,
36
+ )
37
+ if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
38
+ config = get_sagemaker_input()
39
+ else:
40
+ config = get_cluster_input()
41
+ return config
42
+
43
+
44
+ def config_command_parser(subparsers=None):
45
+ if subparsers is not None:
46
+ parser = subparsers.add_parser("config", description=description)
47
+ else:
48
+ parser = argparse.ArgumentParser("Accelerate config command", description=description)
49
+
50
+ parser.add_argument(
51
+ "--config_file",
52
+ default=None,
53
+ help=(
54
+ "The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
55
+ "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
56
+ "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
57
+ "with 'huggingface'."
58
+ ),
59
+ )
60
+
61
+ if subparsers is not None:
62
+ parser.set_defaults(func=config_command)
63
+ return parser
64
+
65
+
66
+ def config_command(args):
67
+ config = get_user_input()
68
+ if args.config_file is not None:
69
+ config_file = args.config_file
70
+ else:
71
+ if not os.path.isdir(cache_dir):
72
+ os.makedirs(cache_dir)
73
+ config_file = default_yaml_config_file
74
+
75
+ if config_file.endswith(".json"):
76
+ config.to_json_file(config_file)
77
+ else:
78
+ config.to_yaml_file(config_file)
79
+ print(f"accelerate configuration saved at {config_file}")
80
+
81
+
82
+ def main():
83
+ parser = config_command_parser()
84
+ args = parser.parse_args()
85
+ config_command(args)
86
+
87
+
88
+ if __name__ == "__main__":
89
+ main()
llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/config_args.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import json
18
+ import os
19
+ from dataclasses import dataclass
20
+ from enum import Enum
21
+ from typing import List, Optional, Union
22
+
23
+ import yaml
24
+
25
+ from ...utils import ComputeEnvironment, DistributedType, SageMakerDistributedType
26
+ from ...utils.constants import SAGEMAKER_PYTHON_VERSION, SAGEMAKER_PYTORCH_VERSION, SAGEMAKER_TRANSFORMERS_VERSION
27
+
28
+
29
+ hf_cache_home = os.path.expanduser(
30
+ os.environ.get("HF_HOME", os.path.join(os.environ.get("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
31
+ )
32
+ cache_dir = os.path.join(hf_cache_home, "accelerate")
33
+ default_json_config_file = os.path.join(cache_dir, "default_config.yaml")
34
+ default_yaml_config_file = os.path.join(cache_dir, "default_config.yaml")
35
+
36
+ # For backward compatibility: the default config is the json one if it's the only existing file.
37
+ if os.path.isfile(default_yaml_config_file) or not os.path.isfile(default_json_config_file):
38
+ default_config_file = default_yaml_config_file
39
+ else:
40
+ default_config_file = default_json_config_file
41
+
42
+
43
+ def load_config_from_file(config_file):
44
+ if config_file is not None:
45
+ if not os.path.isfile(config_file):
46
+ raise FileNotFoundError(
47
+ f"The passed configuration file `{config_file}` does not exist. "
48
+ "Please pass an existing file to `accelerate launch`, or use the default one "
49
+ "created through `accelerate config` and run `accelerate launch` "
50
+ "without the `--config_file` argument."
51
+ )
52
+ else:
53
+ config_file = default_config_file
54
+ with open(config_file, encoding="utf-8") as f:
55
+ if config_file.endswith(".json"):
56
+ if (
57
+ json.load(f).get("compute_environment", ComputeEnvironment.LOCAL_MACHINE)
58
+ == ComputeEnvironment.LOCAL_MACHINE
59
+ ):
60
+ config_class = ClusterConfig
61
+ else:
62
+ config_class = SageMakerConfig
63
+ return config_class.from_json_file(json_file=config_file)
64
+ else:
65
+ if (
66
+ yaml.safe_load(f).get("compute_environment", ComputeEnvironment.LOCAL_MACHINE)
67
+ == ComputeEnvironment.LOCAL_MACHINE
68
+ ):
69
+ config_class = ClusterConfig
70
+ else:
71
+ config_class = SageMakerConfig
72
+ return config_class.from_yaml_file(yaml_file=config_file)
73
+
74
+
75
+ @dataclass
76
+ class BaseConfig:
77
+ compute_environment: ComputeEnvironment
78
+ distributed_type: Union[DistributedType, SageMakerDistributedType]
79
+ mixed_precision: str
80
+ use_cpu: bool
81
+ debug: bool
82
+
83
+ def to_dict(self):
84
+ result = self.__dict__
85
+ # For serialization, it's best to convert Enums to strings (or their underlying value type).
86
+ for key, value in result.items():
87
+ if isinstance(value, Enum):
88
+ result[key] = value.value
89
+ if isinstance(value, dict) and not bool(value):
90
+ result[key] = None
91
+ result = {k: v for k, v in result.items() if v is not None}
92
+ return result
93
+
94
+ @classmethod
95
+ def from_json_file(cls, json_file=None):
96
+ json_file = default_json_config_file if json_file is None else json_file
97
+ with open(json_file, encoding="utf-8") as f:
98
+ config_dict = json.load(f)
99
+ if "compute_environment" not in config_dict:
100
+ config_dict["compute_environment"] = ComputeEnvironment.LOCAL_MACHINE
101
+ if "mixed_precision" not in config_dict:
102
+ config_dict["mixed_precision"] = "fp16" if ("fp16" in config_dict and config_dict["fp16"]) else None
103
+ if "fp16" in config_dict: # Convert the config to the new format.
104
+ del config_dict["fp16"]
105
+ if "dynamo_backend" in config_dict: # Convert the config to the new format.
106
+ dynamo_backend = config_dict.pop("dynamo_backend")
107
+ config_dict["dynamo_config"] = {} if dynamo_backend == "NO" else {"dynamo_backend": dynamo_backend}
108
+ if "use_cpu" not in config_dict:
109
+ config_dict["use_cpu"] = False
110
+ if "debug" not in config_dict:
111
+ config_dict["debug"] = False
112
+ if "enable_cpu_affinity" not in config_dict:
113
+ config_dict["enable_cpu_affinity"] = False
114
+ extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys()))
115
+ if len(extra_keys) > 0:
116
+ raise ValueError(
117
+ f"The config file at {json_file} had unknown keys ({extra_keys}), please try upgrading your `accelerate`"
118
+ " version or fix (and potentially remove) these keys from your config file."
119
+ )
120
+
121
+ return cls(**config_dict)
122
+
123
+ def to_json_file(self, json_file):
124
+ with open(json_file, "w", encoding="utf-8") as f:
125
+ content = json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
126
+ f.write(content)
127
+
128
+ @classmethod
129
+ def from_yaml_file(cls, yaml_file=None):
130
+ yaml_file = default_yaml_config_file if yaml_file is None else yaml_file
131
+ with open(yaml_file, encoding="utf-8") as f:
132
+ config_dict = yaml.safe_load(f)
133
+ if "compute_environment" not in config_dict:
134
+ config_dict["compute_environment"] = ComputeEnvironment.LOCAL_MACHINE
135
+ if "mixed_precision" not in config_dict:
136
+ config_dict["mixed_precision"] = "fp16" if ("fp16" in config_dict and config_dict["fp16"]) else None
137
+ if isinstance(config_dict["mixed_precision"], bool) and not config_dict["mixed_precision"]:
138
+ config_dict["mixed_precision"] = "no"
139
+ if "fp16" in config_dict: # Convert the config to the new format.
140
+ del config_dict["fp16"]
141
+ if "dynamo_backend" in config_dict: # Convert the config to the new format.
142
+ dynamo_backend = config_dict.pop("dynamo_backend")
143
+ config_dict["dynamo_config"] = {} if dynamo_backend == "NO" else {"dynamo_backend": dynamo_backend}
144
+ if "use_cpu" not in config_dict:
145
+ config_dict["use_cpu"] = False
146
+ if "debug" not in config_dict:
147
+ config_dict["debug"] = False
148
+ if "enable_cpu_affinity" not in config_dict:
149
+ config_dict["enable_cpu_affinity"] = False
150
+ extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys()))
151
+ if len(extra_keys) > 0:
152
+ raise ValueError(
153
+ f"The config file at {yaml_file} had unknown keys ({extra_keys}), please try upgrading your `accelerate`"
154
+ " version or fix (and potentially remove) these keys from your config file."
155
+ )
156
+ return cls(**config_dict)
157
+
158
+ def to_yaml_file(self, yaml_file):
159
+ with open(yaml_file, "w", encoding="utf-8") as f:
160
+ yaml.safe_dump(self.to_dict(), f)
161
+
162
+ def __post_init__(self):
163
+ if isinstance(self.compute_environment, str):
164
+ self.compute_environment = ComputeEnvironment(self.compute_environment)
165
+ if isinstance(self.distributed_type, str):
166
+ if self.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
167
+ self.distributed_type = SageMakerDistributedType(self.distributed_type)
168
+ else:
169
+ self.distributed_type = DistributedType(self.distributed_type)
170
+ if getattr(self, "dynamo_config", None) is None:
171
+ self.dynamo_config = {}
172
+
173
+
174
+ @dataclass
175
+ class ClusterConfig(BaseConfig):
176
+ num_processes: int
177
+ machine_rank: int = 0
178
+ num_machines: int = 1
179
+ gpu_ids: Optional[str] = None
180
+ main_process_ip: Optional[str] = None
181
+ main_process_port: Optional[int] = None
182
+ rdzv_backend: Optional[str] = "static"
183
+ same_network: Optional[bool] = False
184
+ main_training_function: str = "main"
185
+ enable_cpu_affinity: bool = False
186
+
187
+ # args for deepspeed_plugin
188
+ deepspeed_config: dict = None
189
+ # args for fsdp
190
+ fsdp_config: dict = None
191
+ # args for megatron_lm
192
+ megatron_lm_config: dict = None
193
+ # args for ipex
194
+ ipex_config: dict = None
195
+ # args for mpirun
196
+ mpirun_config: dict = None
197
+ # args for TPU
198
+ downcast_bf16: bool = False
199
+
200
+ # args for TPU pods
201
+ tpu_name: str = None
202
+ tpu_zone: str = None
203
+ tpu_use_cluster: bool = False
204
+ tpu_use_sudo: bool = False
205
+ command_file: str = None
206
+ commands: List[str] = None
207
+ tpu_vm: List[str] = None
208
+ tpu_env: List[str] = None
209
+
210
+ # args for dynamo
211
+ dynamo_config: dict = None
212
+
213
+ def __post_init__(self):
214
+ if self.deepspeed_config is None:
215
+ self.deepspeed_config = {}
216
+ if self.fsdp_config is None:
217
+ self.fsdp_config = {}
218
+ if self.megatron_lm_config is None:
219
+ self.megatron_lm_config = {}
220
+ if self.ipex_config is None:
221
+ self.ipex_config = {}
222
+ if self.mpirun_config is None:
223
+ self.mpirun_config = {}
224
+ return super().__post_init__()
225
+
226
+
227
+ @dataclass
228
+ class SageMakerConfig(BaseConfig):
229
+ ec2_instance_type: str
230
+ iam_role_name: str
231
+ image_uri: Optional[str] = None
232
+ profile: Optional[str] = None
233
+ region: str = "us-east-1"
234
+ num_machines: int = 1
235
+ gpu_ids: str = "all"
236
+ base_job_name: str = f"accelerate-sagemaker-{num_machines}"
237
+ pytorch_version: str = SAGEMAKER_PYTORCH_VERSION
238
+ transformers_version: str = SAGEMAKER_TRANSFORMERS_VERSION
239
+ py_version: str = SAGEMAKER_PYTHON_VERSION
240
+ sagemaker_inputs_file: str = None
241
+ sagemaker_metrics_file: str = None
242
+ additional_args: dict = None
243
+ dynamo_config: dict = None
llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/config_utils.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+
19
+ from ...utils.dataclasses import (
20
+ ComputeEnvironment,
21
+ DistributedType,
22
+ DynamoBackend,
23
+ PrecisionType,
24
+ SageMakerDistributedType,
25
+ )
26
+ from ..menu import BulletMenu
27
+
28
+
29
+ DYNAMO_BACKENDS = [
30
+ "EAGER",
31
+ "AOT_EAGER",
32
+ "INDUCTOR",
33
+ "AOT_TS_NVFUSER",
34
+ "NVPRIMS_NVFUSER",
35
+ "CUDAGRAPHS",
36
+ "OFI",
37
+ "FX2TRT",
38
+ "ONNXRT",
39
+ "TENSORRT",
40
+ "IPEX",
41
+ "TVM",
42
+ ]
43
+
44
+
45
+ def _ask_field(input_text, convert_value=None, default=None, error_message=None):
46
+ ask_again = True
47
+ while ask_again:
48
+ result = input(input_text)
49
+ try:
50
+ if default is not None and len(result) == 0:
51
+ return default
52
+ return convert_value(result) if convert_value is not None else result
53
+ except Exception:
54
+ if error_message is not None:
55
+ print(error_message)
56
+
57
+
58
+ def _ask_options(input_text, options=[], convert_value=None, default=0):
59
+ menu = BulletMenu(input_text, options)
60
+ result = menu.run(default_choice=default)
61
+ return convert_value(result) if convert_value is not None else result
62
+
63
+
64
+ def _convert_compute_environment(value):
65
+ value = int(value)
66
+ return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value])
67
+
68
+
69
+ def _convert_distributed_mode(value):
70
+ value = int(value)
71
+ return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "MULTI_MLU", "XLA"][value])
72
+
73
+
74
+ def _convert_dynamo_backend(value):
75
+ value = int(value)
76
+ return DynamoBackend(DYNAMO_BACKENDS[value]).value
77
+
78
+
79
+ def _convert_mixed_precision(value):
80
+ value = int(value)
81
+ return PrecisionType(["no", "fp16", "bf16", "fp8"][value])
82
+
83
+
84
+ def _convert_sagemaker_distributed_mode(value):
85
+ value = int(value)
86
+ return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value])
87
+
88
+
89
+ def _convert_yes_no_to_bool(value):
90
+ return {"yes": True, "no": False}[value.lower()]
91
+
92
+
93
+ class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter):
94
+ """
95
+ A custom formatter that will remove the usage line from the help message for subcommands.
96
+ """
97
+
98
+ def _format_usage(self, usage, actions, groups, prefix):
99
+ usage = super()._format_usage(usage, actions, groups, prefix)
100
+ usage = usage.replace("<command> [<args>] ", "")
101
+ return usage
llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/default.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from pathlib import Path
18
+
19
+ import torch
20
+
21
+ from ...utils import is_mlu_available, is_npu_available, is_xpu_available
22
+ from .config_args import ClusterConfig, default_json_config_file
23
+ from .config_utils import SubcommandHelpFormatter
24
+
25
+
26
+ description = "Create a default config file for Accelerate with only a few flags set."
27
+
28
+
29
+ def write_basic_config(mixed_precision="no", save_location: str = default_json_config_file, use_xpu: bool = False):
30
+ """
31
+ Creates and saves a basic cluster config to be used on a local machine with potentially multiple GPUs. Will also
32
+ set CPU if it is a CPU-only machine.
33
+
34
+ Args:
35
+ mixed_precision (`str`, *optional*, defaults to "no"):
36
+ Mixed Precision to use. Should be one of "no", "fp16", or "bf16"
37
+ save_location (`str`, *optional*, defaults to `default_json_config_file`):
38
+ Optional custom save location. Should be passed to `--config_file` when using `accelerate launch`. Default
39
+ location is inside the huggingface cache folder (`~/.cache/huggingface`) but can be overriden by setting
40
+ the `HF_HOME` environmental variable, followed by `accelerate/default_config.yaml`.
41
+ use_xpu (`bool`, *optional*, defaults to `False`):
42
+ Whether to use XPU if available.
43
+ """
44
+ path = Path(save_location)
45
+ path.parent.mkdir(parents=True, exist_ok=True)
46
+ if path.exists():
47
+ print(
48
+ f"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`."
49
+ )
50
+ return False
51
+ mixed_precision = mixed_precision.lower()
52
+ if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
53
+ raise ValueError(
54
+ f"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}"
55
+ )
56
+ config = {
57
+ "compute_environment": "LOCAL_MACHINE",
58
+ "mixed_precision": mixed_precision,
59
+ }
60
+ if is_mlu_available():
61
+ num_mlus = torch.mlu.device_count()
62
+ config["num_processes"] = num_mlus
63
+ config["use_cpu"] = False
64
+ if num_mlus > 1:
65
+ config["distributed_type"] = "MULTI_MLU"
66
+ else:
67
+ config["distributed_type"] = "NO"
68
+ elif torch.cuda.is_available():
69
+ num_gpus = torch.cuda.device_count()
70
+ config["num_processes"] = num_gpus
71
+ config["use_cpu"] = False
72
+ if num_gpus > 1:
73
+ config["distributed_type"] = "MULTI_GPU"
74
+ else:
75
+ config["distributed_type"] = "NO"
76
+ elif is_xpu_available() and use_xpu:
77
+ num_xpus = torch.xpu.device_count()
78
+ config["num_processes"] = num_xpus
79
+ config["use_cpu"] = False
80
+ if num_xpus > 1:
81
+ config["distributed_type"] = "MULTI_XPU"
82
+ else:
83
+ config["distributed_type"] = "NO"
84
+ elif is_npu_available():
85
+ num_npus = torch.npu.device_count()
86
+ config["num_processes"] = num_npus
87
+ config["use_cpu"] = False
88
+ if num_npus > 1:
89
+ config["distributed_type"] = "MULTI_NPU"
90
+ else:
91
+ config["distributed_type"] = "NO"
92
+ else:
93
+ num_xpus = 0
94
+ config["use_cpu"] = True
95
+ config["num_processes"] = 1
96
+ config["distributed_type"] = "NO"
97
+ config["debug"] = False
98
+ config = ClusterConfig(**config)
99
+ config.to_json_file(path)
100
+ return path
101
+
102
+
103
+ def default_command_parser(parser, parents):
104
+ parser = parser.add_parser("default", parents=parents, help=description, formatter_class=SubcommandHelpFormatter)
105
+ parser.add_argument(
106
+ "--config_file",
107
+ default=default_json_config_file,
108
+ help=(
109
+ "The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
110
+ "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
111
+ "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
112
+ "with 'huggingface'."
113
+ ),
114
+ dest="save_location",
115
+ )
116
+
117
+ parser.add_argument(
118
+ "--mixed_precision",
119
+ choices=["no", "fp16", "bf16"],
120
+ type=str,
121
+ help="Whether or not to use mixed precision training. "
122
+ "Choose between FP16 and BF16 (bfloat16) training. "
123
+ "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.",
124
+ default="no",
125
+ )
126
+ parser.set_defaults(func=default_config_command)
127
+ return parser
128
+
129
+
130
+ def default_config_command(args):
131
+ config_file = write_basic_config(args.mixed_precision, args.save_location)
132
+ if config_file:
133
+ print(f"accelerate configuration saved at {config_file}")
llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/update.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from pathlib import Path
18
+
19
+ from .config_args import default_config_file, load_config_from_file
20
+ from .config_utils import SubcommandHelpFormatter
21
+
22
+
23
+ description = "Update an existing config file with the latest defaults while maintaining the old configuration."
24
+
25
+
26
+ def update_config(args):
27
+ """
28
+ Update an existing config file with the latest defaults while maintaining the old configuration.
29
+ """
30
+ config_file = args.config_file
31
+ if config_file is None and Path(default_config_file).exists():
32
+ config_file = default_config_file
33
+ elif not Path(config_file).exists():
34
+ raise ValueError(f"The passed config file located at {config_file} doesn't exist.")
35
+ config = load_config_from_file(config_file)
36
+
37
+ if config_file.endswith(".json"):
38
+ config.to_json_file(config_file)
39
+ else:
40
+ config.to_yaml_file(config_file)
41
+ return config_file
42
+
43
+
44
+ def update_command_parser(parser, parents):
45
+ parser = parser.add_parser("update", parents=parents, help=description, formatter_class=SubcommandHelpFormatter)
46
+ parser.add_argument(
47
+ "--config_file",
48
+ default=None,
49
+ help=(
50
+ "The path to the config file to update. Will default to a file named default_config.yaml in the cache "
51
+ "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
52
+ "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
53
+ "with 'huggingface'."
54
+ ),
55
+ )
56
+
57
+ parser.set_defaults(func=update_config_command)
58
+ return parser
59
+
60
+
61
+ def update_config_command(args):
62
+ config_file = update_config(args)
63
+ print(f"Sucessfully updated the configuration file at {config_file}.")
llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/__init__.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from .testing import (
15
+ DEFAULT_LAUNCH_COMMAND,
16
+ are_the_same_tensors,
17
+ assert_exception,
18
+ device_count,
19
+ execute_subprocess_async,
20
+ get_launch_command,
21
+ memory_allocated_func,
22
+ path_in_accelerate_package,
23
+ require_bnb,
24
+ require_cpu,
25
+ require_cuda,
26
+ require_huggingface_suite,
27
+ require_mlu,
28
+ require_mps,
29
+ require_multi_device,
30
+ require_multi_gpu,
31
+ require_multi_xpu,
32
+ require_non_cpu,
33
+ require_non_torch_xla,
34
+ require_non_xpu,
35
+ require_npu,
36
+ require_pippy,
37
+ require_single_device,
38
+ require_single_gpu,
39
+ require_single_xpu,
40
+ require_torch_min_version,
41
+ require_torchvision,
42
+ require_tpu,
43
+ require_xpu,
44
+ skip,
45
+ slow,
46
+ torch_device,
47
+ )
48
+ from .training import RegressionDataset, RegressionModel, RegressionModel4XPU
49
+
50
+
51
+ from .scripts import test_script, test_sync, test_ops # isort: skip
llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.27 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/examples.cpython-310.pyc ADDED
Binary file (5.26 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/testing.cpython-310.pyc ADDED
Binary file (20.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/training.cpython-310.pyc ADDED
Binary file (4.23 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/examples.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ A collection of utilities for comparing `examples/complete_*_example.py` scripts with the capabilities inside of each
18
+ `examples/by_feature` example. `compare_against_test` is the main function that should be used when testing, while the
19
+ others are used to either get the code that matters, or to preprocess them (such as stripping comments)
20
+ """
21
+
22
+ import os
23
+ from typing import List
24
+
25
+
26
+ def get_function_contents_by_name(lines: List[str], name: str):
27
+ """
28
+ Extracts a function from `lines` of segmented source code with the name `name`.
29
+
30
+ Args:
31
+ lines (`List[str]`):
32
+ Source code of a script seperated by line.
33
+ name (`str`):
34
+ The name of the function to extract. Should be either `training_function` or `main`
35
+ """
36
+ if name != "training_function" and name != "main":
37
+ raise ValueError(f"Incorrect function name passed: {name}, choose either 'main' or 'training_function'")
38
+ good_lines, found_start = [], False
39
+ for line in lines:
40
+ if not found_start and f"def {name}" in line:
41
+ found_start = True
42
+ good_lines.append(line)
43
+ continue
44
+ if found_start:
45
+ if name == "training_function" and "def main" in line:
46
+ return good_lines
47
+ if name == "main" and "if __name__" in line:
48
+ return good_lines
49
+ good_lines.append(line)
50
+
51
+
52
+ def clean_lines(lines: List[str]):
53
+ """
54
+ Filters `lines` and removes any entries that start with a comment ('#') or is just a newline ('\n')
55
+
56
+ Args:
57
+ lines (`List[str]`):
58
+ Source code of a script seperated by line.
59
+ """
60
+ return [line for line in lines if not line.lstrip().startswith("#") and line != "\n"]
61
+
62
+
63
+ def compare_against_test(base_filename: str, feature_filename: str, parser_only: bool, secondary_filename: str = None):
64
+ """
65
+ Tests whether the additional code inside of `feature_filename` was implemented in `base_filename`. This should be
66
+ used when testing to see if `complete_*_.py` examples have all of the implementations from each of the
67
+ `examples/by_feature/*` scripts.
68
+
69
+ It utilizes `nlp_example.py` to extract out all of the repeated training code, so that only the new additional code
70
+ is examined and checked. If something *other* than `nlp_example.py` should be used, such as `cv_example.py` for the
71
+ `complete_cv_example.py` script, it should be passed in for the `secondary_filename` parameter.
72
+
73
+ Args:
74
+ base_filename (`str` or `os.PathLike`):
75
+ The filepath of a single "complete" example script to test, such as `examples/complete_cv_example.py`
76
+ feature_filename (`str` or `os.PathLike`):
77
+ The filepath of a single feature example script. The contents of this script are checked to see if they
78
+ exist in `base_filename`
79
+ parser_only (`bool`):
80
+ Whether to compare only the `main()` sections in both files, or to compare the contents of
81
+ `training_loop()`
82
+ secondary_filename (`str`, *optional*):
83
+ A potential secondary filepath that should be included in the check. This function extracts the base
84
+ functionalities off of "examples/nlp_example.py", so if `base_filename` is a script other than
85
+ `complete_nlp_example.py`, the template script should be included here. Such as `examples/cv_example.py`
86
+ """
87
+ with open(base_filename) as f:
88
+ base_file_contents = f.readlines()
89
+ with open(os.path.abspath(os.path.join("examples", "nlp_example.py"))) as f:
90
+ full_file_contents = f.readlines()
91
+ with open(feature_filename) as f:
92
+ feature_file_contents = f.readlines()
93
+ if secondary_filename is not None:
94
+ with open(secondary_filename) as f:
95
+ secondary_file_contents = f.readlines()
96
+
97
+ # This is our base, we remove all the code from here in our `full_filename` and `feature_filename` to find the new content
98
+ if parser_only:
99
+ base_file_func = clean_lines(get_function_contents_by_name(base_file_contents, "main"))
100
+ full_file_func = clean_lines(get_function_contents_by_name(full_file_contents, "main"))
101
+ feature_file_func = clean_lines(get_function_contents_by_name(feature_file_contents, "main"))
102
+ if secondary_filename is not None:
103
+ secondary_file_func = clean_lines(get_function_contents_by_name(secondary_file_contents, "main"))
104
+ else:
105
+ base_file_func = clean_lines(get_function_contents_by_name(base_file_contents, "training_function"))
106
+ full_file_func = clean_lines(get_function_contents_by_name(full_file_contents, "training_function"))
107
+ feature_file_func = clean_lines(get_function_contents_by_name(feature_file_contents, "training_function"))
108
+ if secondary_filename is not None:
109
+ secondary_file_func = clean_lines(
110
+ get_function_contents_by_name(secondary_file_contents, "training_function")
111
+ )
112
+
113
+ _dl_line = "train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)\n"
114
+
115
+ # Specific code in our script that differs from the full version, aka what is new
116
+ new_feature_code = []
117
+ passed_idxs = [] # We keep track of the idxs just in case it's a repeated statement
118
+ it = iter(feature_file_func)
119
+ for i in range(len(feature_file_func) - 1):
120
+ if i not in passed_idxs:
121
+ line = next(it)
122
+ if (line not in full_file_func) and (line.lstrip() != _dl_line):
123
+ if "TESTING_MOCKED_DATALOADERS" not in line:
124
+ new_feature_code.append(line)
125
+ passed_idxs.append(i)
126
+ else:
127
+ # Skip over the `config['num_epochs'] = 2` statement
128
+ _ = next(it)
129
+
130
+ # Extract out just the new parts from the full_file_training_func
131
+ new_full_example_parts = []
132
+ passed_idxs = [] # We keep track of the idxs just in case it's a repeated statement
133
+ for i, line in enumerate(base_file_func):
134
+ if i not in passed_idxs:
135
+ if (line not in full_file_func) and (line.lstrip() != _dl_line):
136
+ if "TESTING_MOCKED_DATALOADERS" not in line:
137
+ new_full_example_parts.append(line)
138
+ passed_idxs.append(i)
139
+
140
+ # Finally, get the overall diff
141
+ diff_from_example = [line for line in new_feature_code if line not in new_full_example_parts]
142
+ if secondary_filename is not None:
143
+ diff_from_two = [line for line in full_file_contents if line not in secondary_file_func]
144
+ diff_from_example = [line for line in diff_from_example if line not in diff_from_two]
145
+
146
+ return diff_from_example
llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (202 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_cli.cpython-310.pyc ADDED
Binary file (494 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_distributed_data_loop.cpython-310.pyc ADDED
Binary file (8.27 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_notebook.cpython-310.pyc ADDED
Binary file (1.51 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_ops.cpython-310.pyc ADDED
Binary file (4.61 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_script.cpython-310.pyc ADDED
Binary file (20.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_sync.cpython-310.pyc ADDED
Binary file (9.05 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (216 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_checkpointing.cpython-310.pyc ADDED
Binary file (6.54 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_metrics.cpython-310.pyc ADDED
Binary file (9.86 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_peak_memory_usage.cpython-310.pyc ADDED
Binary file (7.18 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_performance.cpython-310.pyc ADDED
Binary file (5.91 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_pippy.cpython-310.pyc ADDED
Binary file (2.99 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_zero3_integration.cpython-310.pyc ADDED
Binary file (1.15 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_checkpointing.py ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import argparse
15
+ import json
16
+ import os
17
+
18
+ import evaluate
19
+ import torch
20
+ from datasets import load_dataset
21
+ from torch.optim import AdamW
22
+ from torch.utils.data import DataLoader
23
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
24
+
25
+ from accelerate import Accelerator, DistributedType
26
+ from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
27
+
28
+
29
+ MAX_GPU_BATCH_SIZE = 16
30
+ EVAL_BATCH_SIZE = 32
31
+
32
+
33
+ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name: str = "bert-base-cased"):
34
+ """
35
+ Creates a set of `DataLoader`s for the `glue` dataset.
36
+
37
+ Args:
38
+ accelerator (`Accelerator`):
39
+ An `Accelerator` object
40
+ batch_size (`int`, *optional*):
41
+ The batch size for the train and validation DataLoaders.
42
+ model_name (`str`, *optional*):
43
+ """
44
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
45
+ datasets = load_dataset("glue", "mrpc")
46
+
47
+ def tokenize_function(examples):
48
+ # max_length=None => use the model max length (it's actually the default)
49
+ outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
50
+ return outputs
51
+
52
+ # Apply the method we just defined to all the examples in all the splits of the dataset
53
+ tokenized_datasets = datasets.map(
54
+ tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False
55
+ )
56
+
57
+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
58
+ # transformers library
59
+ tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
60
+
61
+ def collate_fn(examples):
62
+ # On TPU it's best to pad everything to the same length or training will be very slow.
63
+ if accelerator.distributed_type == DistributedType.XLA:
64
+ return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
65
+ return tokenizer.pad(examples, padding="longest", return_tensors="pt")
66
+
67
+ # Instantiate dataloaders.
68
+ train_dataloader = DataLoader(
69
+ tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
70
+ )
71
+ eval_dataloader = DataLoader(
72
+ tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
73
+ )
74
+
75
+ return train_dataloader, eval_dataloader
76
+
77
+
78
+ def evaluation_loop(accelerator, model, eval_dataloader, metric):
79
+ model.eval()
80
+ samples_seen = 0
81
+ for step, batch in enumerate(eval_dataloader):
82
+ # We could avoid this line since we set the accelerator with `device_placement=True`.
83
+ batch.to(accelerator.device)
84
+ with torch.no_grad():
85
+ outputs = model(**batch)
86
+ predictions = outputs.logits.argmax(dim=-1)
87
+ # It is slightly faster to call this once, than multiple times
88
+ predictions, references = accelerator.gather(
89
+ (predictions, batch["labels"])
90
+ ) # If we are in a multiprocess environment, the last batch has duplicates
91
+ if accelerator.use_distributed:
92
+ if step == len(eval_dataloader) - 1:
93
+ predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]
94
+ references = references[: len(eval_dataloader.dataset) - samples_seen]
95
+ else:
96
+ samples_seen += references.shape[0]
97
+ metric.add_batch(
98
+ predictions=predictions,
99
+ references=references,
100
+ )
101
+
102
+ eval_metric = metric.compute()
103
+ return eval_metric["accuracy"]
104
+
105
+
106
+ def training_function(config, args):
107
+ # Initialize accelerator
108
+ accelerator = Accelerator()
109
+
110
+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
111
+ lr = config["lr"]
112
+ num_epochs = int(config["num_epochs"])
113
+ seed = int(config["seed"])
114
+ batch_size = int(config["batch_size"])
115
+ model_name = args.model_name_or_path
116
+
117
+ set_seed(seed)
118
+ train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name)
119
+
120
+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)
121
+ model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True)
122
+
123
+ # Instantiate optimizer
124
+ optimizer_cls = (
125
+ AdamW
126
+ if accelerator.state.deepspeed_plugin is None
127
+ or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
128
+ else DummyOptim
129
+ )
130
+ optimizer = optimizer_cls(params=model.parameters(), lr=lr)
131
+
132
+ if accelerator.state.deepspeed_plugin is not None:
133
+ gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[
134
+ "gradient_accumulation_steps"
135
+ ]
136
+ else:
137
+ gradient_accumulation_steps = 1
138
+ max_training_steps = (len(train_dataloader) * num_epochs) // gradient_accumulation_steps
139
+
140
+ # Instantiate scheduler
141
+ if (
142
+ accelerator.state.deepspeed_plugin is None
143
+ or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
144
+ ):
145
+ lr_scheduler = get_linear_schedule_with_warmup(
146
+ optimizer=optimizer,
147
+ num_warmup_steps=0,
148
+ num_training_steps=max_training_steps,
149
+ )
150
+ else:
151
+ lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0)
152
+
153
+ # Prepare everything
154
+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
155
+ # prepare method.
156
+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
157
+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
158
+ )
159
+
160
+ # We need to keep track of how many total steps we have iterated over
161
+ overall_step = 0
162
+ # We also need to keep track of the stating epoch so files are named properly
163
+ starting_epoch = 0
164
+ metric = evaluate.load("glue", "mrpc")
165
+ ending_epoch = num_epochs
166
+
167
+ if args.partial_train_epoch is not None:
168
+ ending_epoch = args.partial_train_epoch
169
+
170
+ if args.resume_from_checkpoint:
171
+ accelerator.load_state(args.resume_from_checkpoint)
172
+ epoch_string = args.resume_from_checkpoint.split("epoch_")[1]
173
+ state_epoch_num = ""
174
+ for char in epoch_string:
175
+ if char.isdigit():
176
+ state_epoch_num += char
177
+ else:
178
+ break
179
+ starting_epoch = int(state_epoch_num) + 1
180
+ accuracy = evaluation_loop(accelerator, model, eval_dataloader, metric)
181
+ accelerator.print("resumed checkpoint performance:", accuracy)
182
+ accelerator.print("resumed checkpoint's scheduler's lr:", lr_scheduler.get_lr()[0])
183
+ accelerator.print("resumed optimizers's lr:", optimizer.param_groups[0]["lr"])
184
+ with open(os.path.join(args.output_dir, f"state_{starting_epoch - 1}.json")) as f:
185
+ resumed_state = json.load(f)
186
+ assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
187
+ assert (
188
+ resumed_state["lr"] == lr_scheduler.get_lr()[0]
189
+ ), "Scheduler learning rate mismatch, loading from checkpoint failed"
190
+ assert (
191
+ resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
192
+ ), "Optimizer learning rate mismatch, loading from checkpoint failed"
193
+ assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
194
+ return
195
+
196
+ # Now we train the model
197
+ state = {}
198
+ for epoch in range(starting_epoch, ending_epoch):
199
+ model.train()
200
+ for step, batch in enumerate(train_dataloader):
201
+ outputs = model(**batch)
202
+ loss = outputs.loss
203
+ loss = loss / gradient_accumulation_steps
204
+ accelerator.backward(loss)
205
+ if step % gradient_accumulation_steps == 0:
206
+ optimizer.step()
207
+ lr_scheduler.step()
208
+ optimizer.zero_grad()
209
+
210
+ overall_step += 1
211
+ output_dir = f"epoch_{epoch}"
212
+ output_dir = os.path.join(args.output_dir, output_dir)
213
+ accelerator.save_state(output_dir)
214
+ accuracy = evaluation_loop(accelerator, model, eval_dataloader, metric)
215
+ state["accuracy"] = accuracy
216
+ state["lr"] = lr_scheduler.get_lr()[0]
217
+ state["optimizer_lr"] = optimizer.param_groups[0]["lr"]
218
+ state["epoch"] = epoch
219
+ state["step"] = overall_step
220
+ accelerator.print(f"epoch {epoch}:", state)
221
+
222
+ accelerator.wait_for_everyone()
223
+ if accelerator.is_main_process:
224
+ with open(os.path.join(args.output_dir, f"state_{epoch}.json"), "w") as f:
225
+ json.dump(state, f)
226
+
227
+
228
+ def main():
229
+ parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.")
230
+ parser.add_argument(
231
+ "--model_name_or_path",
232
+ type=str,
233
+ default="bert-base-cased",
234
+ help="Path to pretrained model or model identifier from huggingface.co/models.",
235
+ required=False,
236
+ )
237
+ parser.add_argument(
238
+ "--output_dir",
239
+ type=str,
240
+ default=".",
241
+ help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.",
242
+ )
243
+ parser.add_argument(
244
+ "--resume_from_checkpoint",
245
+ type=str,
246
+ default=None,
247
+ help="If the training should continue from a checkpoint folder.",
248
+ )
249
+ parser.add_argument(
250
+ "--partial_train_epoch",
251
+ type=int,
252
+ default=None,
253
+ help="If passed, the training will stop after this number of epochs.",
254
+ )
255
+ parser.add_argument(
256
+ "--num_epochs",
257
+ type=int,
258
+ default=2,
259
+ help="Number of train epochs.",
260
+ )
261
+ args = parser.parse_args()
262
+ config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
263
+
264
+ training_function(config, args)
265
+
266
+
267
+ if __name__ == "__main__":
268
+ main()
llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_metrics.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import logging
16
+ import math
17
+ import os
18
+ from copy import deepcopy
19
+
20
+ import datasets
21
+ import evaluate
22
+ import torch
23
+ import transformers
24
+ from datasets import load_dataset
25
+ from torch.utils.data import DataLoader, IterableDataset
26
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
27
+
28
+ from accelerate import Accelerator, DataLoaderConfiguration, DistributedType
29
+ from accelerate.data_loader import DataLoaderDispatcher
30
+ from accelerate.test_utils import RegressionDataset, RegressionModel, torch_device
31
+ from accelerate.utils import is_torch_xla_available, set_seed
32
+
33
+
34
+ os.environ["TRANSFORMERS_NO_ADVISORY_WARNINGS"] = "true"
35
+
36
+
37
+ class ListHandler(logging.Handler):
38
+ def __init__(self, *args, **kwargs):
39
+ super().__init__(*args, **kwargs)
40
+ self.logs = []
41
+
42
+ def emit(self, record):
43
+ self.logs.append(record)
44
+
45
+
46
+ def get_basic_setup(accelerator, num_samples=82, batch_size=16):
47
+ "Returns everything needed to perform basic training"
48
+ set_seed(42)
49
+ model = RegressionModel()
50
+ ddp_model = deepcopy(model)
51
+ dset = RegressionDataset(length=num_samples)
52
+ dataloader = DataLoader(dset, batch_size=batch_size)
53
+ model.to(accelerator.device)
54
+ ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader)
55
+ return model, ddp_model, dataloader
56
+
57
+
58
+ def get_dataloader(accelerator: Accelerator, use_longest=False):
59
+ tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased")
60
+ dataset = load_dataset("glue", "mrpc", split="validation")
61
+
62
+ def tokenize_function(examples):
63
+ outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
64
+ return outputs
65
+
66
+ with accelerator.main_process_first():
67
+ tokenized_datasets = dataset.map(
68
+ tokenize_function,
69
+ batched=True,
70
+ remove_columns=["idx", "sentence1", "sentence2"],
71
+ )
72
+
73
+ tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
74
+
75
+ def collate_fn(examples):
76
+ if use_longest:
77
+ return tokenizer.pad(examples, padding="longest", return_tensors="pt")
78
+ return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
79
+
80
+ return DataLoader(tokenized_datasets, shuffle=False, collate_fn=collate_fn, batch_size=16)
81
+
82
+
83
+ def get_mrpc_setup(dispatch_batches, split_batches):
84
+ dataloader_config = DataLoaderConfiguration(dispatch_batches=dispatch_batches, split_batches=split_batches)
85
+ accelerator = Accelerator(dataloader_config=dataloader_config)
86
+ dataloader = get_dataloader(accelerator, not dispatch_batches)
87
+ model = AutoModelForSequenceClassification.from_pretrained(
88
+ "hf-internal-testing/mrpc-bert-base-cased", return_dict=True
89
+ )
90
+ ddp_model, ddp_dataloader = accelerator.prepare(model, dataloader)
91
+ return {
92
+ "ddp": [ddp_model, ddp_dataloader, torch_device],
93
+ "no": [model, dataloader, accelerator.device],
94
+ }, accelerator
95
+
96
+
97
+ def generate_predictions(model, dataloader, accelerator):
98
+ logits_and_targets = []
99
+ for batch in dataloader:
100
+ input, target = batch.values()
101
+ with torch.no_grad():
102
+ logit = model(input)
103
+ logit, target = accelerator.gather_for_metrics((logit, target))
104
+ logits_and_targets.append((logit, target))
105
+ logits, targs = [], []
106
+ for logit, targ in logits_and_targets:
107
+ logits.append(logit)
108
+ targs.append(targ)
109
+ logits, targs = torch.cat(logits), torch.cat(targs)
110
+ return logits, targs
111
+
112
+
113
+ def test_torch_metrics(
114
+ accelerator: Accelerator, num_samples=82, dispatch_batches=False, split_batches=False, batch_size=16
115
+ ):
116
+ _, ddp_model, dataloader = get_basic_setup(accelerator, num_samples, batch_size)
117
+ logits, _ = generate_predictions(ddp_model, dataloader, accelerator)
118
+ assert (
119
+ len(logits) == num_samples
120
+ ), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(logits)}"
121
+
122
+
123
+ def test_mrpc(dispatch_batches: bool = False, split_batches: bool = False):
124
+ metric = evaluate.load("glue", "mrpc")
125
+ setup, accelerator = get_mrpc_setup(dispatch_batches, split_batches)
126
+ # First do baseline
127
+ model, dataloader, device = setup["no"]
128
+ model.to(device)
129
+ model.eval()
130
+ for batch in dataloader:
131
+ batch.to(device)
132
+ with torch.inference_mode():
133
+ outputs = model(**batch)
134
+ preds = outputs.logits.argmax(dim=-1)
135
+ metric.add_batch(predictions=preds, references=batch["labels"])
136
+ baseline = metric.compute()
137
+
138
+ # Then do distributed
139
+ model, dataloader, device = setup["ddp"]
140
+ model.eval()
141
+ for batch in dataloader:
142
+ with torch.inference_mode():
143
+ outputs = model(**batch)
144
+ preds = outputs.logits.argmax(dim=-1)
145
+ references = batch["labels"]
146
+ preds, references = accelerator.gather_for_metrics((preds, references))
147
+ metric.add_batch(predictions=preds, references=references)
148
+ distributed = metric.compute()
149
+
150
+ for key in "accuracy f1".split():
151
+ assert math.isclose(
152
+ baseline[key], distributed[key]
153
+ ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
154
+
155
+
156
+ def test_gather_for_metrics_with_non_tensor_objects_iterable_dataset():
157
+ class DummyIterableDataset(IterableDataset):
158
+ def __init__(self, data):
159
+ self.data = data
160
+
161
+ def __len__(self):
162
+ return len(self.data)
163
+
164
+ def __iter__(self):
165
+ yield from self.data
166
+
167
+ iterable_dataset = DummyIterableDataset([n for n in range(30)])
168
+ dataloader = DataLoader(iterable_dataset, batch_size=4)
169
+ accelerator = Accelerator()
170
+ prepared_dataloader = accelerator.prepare(dataloader)
171
+
172
+ if accelerator.is_main_process:
173
+ logger = logging.root.manager.loggerDict["accelerate.accelerator"]
174
+ list_handler = ListHandler()
175
+ logger.addHandler(list_handler)
176
+
177
+ batches_for_metrics = []
178
+ for batch in prepared_dataloader:
179
+ batches_for_metrics.append(accelerator.gather_for_metrics(batch))
180
+
181
+ assert torch.cat(batches_for_metrics).size(0) == 30
182
+
183
+ if accelerator.is_main_process:
184
+ assert len(list_handler.logs) == 0
185
+ logger.removeHandler(list_handler)
186
+
187
+
188
+ def test_gather_for_metrics_with_iterable_dataset():
189
+ class DummyIterableDataset(IterableDataset):
190
+ def __init__(self, data):
191
+ self.data = data
192
+
193
+ def __len__(self):
194
+ return len(self.data)
195
+
196
+ def __iter__(self):
197
+ yield from self.data
198
+
199
+ iterable_dataset = DummyIterableDataset(torch.as_tensor(range(30)))
200
+ dataloader = DataLoader(iterable_dataset, batch_size=4)
201
+
202
+ accelerator = Accelerator()
203
+ prepared_dataloader = accelerator.prepare(dataloader)
204
+
205
+ assert isinstance(prepared_dataloader, DataLoaderDispatcher)
206
+
207
+ if accelerator.is_main_process:
208
+ logger = logging.root.manager.loggerDict["accelerate.accelerator"]
209
+ list_handler = ListHandler()
210
+ logger.addHandler(list_handler)
211
+
212
+ batches_for_metrics = []
213
+ for batch in prepared_dataloader:
214
+ batches_for_metrics.append(accelerator.gather_for_metrics(batch))
215
+
216
+ assert torch.cat(batches_for_metrics).size(0) == 30
217
+
218
+ if accelerator.is_main_process:
219
+ assert len(list_handler.logs) == 0
220
+
221
+ logger.removeHandler(list_handler)
222
+
223
+
224
+ def test_gather_for_metrics_drop_last():
225
+ accelerator = Accelerator()
226
+ per_device_batch_size = 5
227
+ num_items = (10 * accelerator.num_processes) + 1
228
+ dataloader = DataLoader(range(num_items), batch_size=per_device_batch_size, drop_last=True)
229
+ dataloader = accelerator.prepare(dataloader)
230
+
231
+ iterator = iter(dataloader)
232
+ next(iterator) # Skip first batch tensor([0, 1, 2, 3, 4], device='cuda:0')
233
+ batch = next(iterator)
234
+ gathered_items = accelerator.gather_for_metrics(batch)
235
+
236
+ # Should return a full set of complete batches from each GPU
237
+ num_expected_items = per_device_batch_size * accelerator.num_processes
238
+ assert gathered_items.size(0) == (
239
+ num_expected_items
240
+ ), f"Expected number of items: {num_expected_items}, Actual: {gathered_items.size(0)}"
241
+
242
+
243
+ def main():
244
+ dataloader_config = DataLoaderConfiguration(split_batches=False, dispatch_batches=False)
245
+ accelerator = Accelerator(dataloader_config=dataloader_config)
246
+ if accelerator.is_local_main_process:
247
+ datasets.utils.logging.set_verbosity_warning()
248
+ transformers.utils.logging.set_verbosity_warning()
249
+ else:
250
+ datasets.utils.logging.set_verbosity_error()
251
+ transformers.utils.logging.set_verbosity_error()
252
+ # TorchXLA does not support batch dispatching. 'put_on_device' is always False for
253
+ # TorchXLA, which can cause a value error in 'prepare_data_loader' function.
254
+ dispatch_batches_options = [False] if accelerator.state.distributed_type == DistributedType.XLA else [True, False]
255
+
256
+ # Temporarily close this test for TorchXLA due to the 'Cannot set version_counter for
257
+ # inference tensor' error in inference mode. Reopen it after TorchXLA fixes this bug.
258
+ # These are a bit slower so they should only be ran on the GPU or TPU
259
+ if accelerator.device.type != "cpu" and not is_torch_xla_available():
260
+ if accelerator.is_local_main_process:
261
+ print("**Testing gather_for_metrics**")
262
+ for split_batches in [True, False]:
263
+ for dispatch_batches in dispatch_batches_options:
264
+ if accelerator.is_local_main_process:
265
+ print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`")
266
+ test_mrpc(dispatch_batches, split_batches)
267
+ accelerator.state._reset_state()
268
+ print("test_gather_for_metrics_with_iterable_dataset")
269
+ test_gather_for_metrics_with_iterable_dataset()
270
+ print("test gather_for_metrics_with_non_tensor_objects_iterable_dataset")
271
+ test_gather_for_metrics_with_non_tensor_objects_iterable_dataset()
272
+
273
+ # MpDeviceLoader in TorchXLA is an asynchronous loader that preloads several batches into cache.
274
+ # This can cause the 'end_of_dataloader' of DataLoaderStateMixin to be set earlier than intended.
275
+ # Skip this test when TorchXLA is enabled.
276
+ if accelerator.state.distributed_type != DistributedType.XLA:
277
+ if accelerator.is_local_main_process:
278
+ print("**Test torch metrics**")
279
+ for split_batches in [True, False]:
280
+ for dispatch_batches in dispatch_batches_options:
281
+ dataloader_config = DataLoaderConfiguration(
282
+ split_batches=split_batches, dispatch_batches=dispatch_batches
283
+ )
284
+ accelerator = Accelerator(dataloader_config=dataloader_config)
285
+ if accelerator.is_local_main_process:
286
+ print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99")
287
+ test_torch_metrics(accelerator, 99)
288
+ accelerator.state._reset_state()
289
+ if accelerator.is_local_main_process:
290
+ print("**Test last batch is not dropped when perfectly divisible**")
291
+ accelerator = Accelerator()
292
+ test_torch_metrics(accelerator, 512)
293
+ accelerator.state._reset_state()
294
+ if accelerator.is_local_main_process:
295
+ print("**Test that `drop_last` is taken into account**")
296
+ test_gather_for_metrics_drop_last()
297
+ accelerator.state._reset_state()
298
+
299
+
300
+ def _mp_fn(index):
301
+ # For xla_spawn (TPUs)
302
+ main()
303
+
304
+
305
+ if __name__ == "__main__":
306
+ main()
llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import argparse
15
+ import gc
16
+ import json
17
+ import os
18
+
19
+ import torch
20
+ from datasets import load_dataset
21
+ from torch.optim import AdamW
22
+ from torch.utils.data import DataLoader
23
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
24
+
25
+ from accelerate import Accelerator, DistributedType
26
+ from accelerate.utils import is_mlu_available, is_npu_available, is_xpu_available
27
+ from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
28
+
29
+
30
+ MAX_GPU_BATCH_SIZE = 16
31
+ EVAL_BATCH_SIZE = 32
32
+
33
+
34
+ # Converting Bytes to Megabytes
35
+ def b2mb(x):
36
+ return int(x / 2**20)
37
+
38
+
39
+ # This context manager is used to track the peak memory usage of the process
40
+ class TorchTracemalloc:
41
+ def __enter__(self):
42
+ gc.collect()
43
+ if torch.cuda.is_available():
44
+ torch.cuda.empty_cache()
45
+ torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
46
+ self.begin = torch.cuda.memory_allocated()
47
+ elif is_mlu_available():
48
+ torch.mlu.empty_cache()
49
+ torch.mlu.reset_max_memory_allocated() # reset the peak gauge to zero
50
+ self.begin = torch.mlu.memory_allocated()
51
+ elif is_npu_available():
52
+ torch.npu.empty_cache()
53
+ torch.npu.reset_max_memory_allocated() # reset the peak gauge to zero
54
+ self.begin = torch.npu.memory_allocated()
55
+ elif is_xpu_available():
56
+ torch.xpu.empty_cache()
57
+ torch.xpu.reset_max_memory_allocated() # reset the peak gauge to zero
58
+ self.begin = torch.xpu.memory_allocated()
59
+ return self
60
+
61
+ def __exit__(self, *exc):
62
+ gc.collect()
63
+ if torch.cuda.is_available():
64
+ torch.cuda.empty_cache()
65
+ self.end = torch.cuda.memory_allocated()
66
+ self.peak = torch.cuda.max_memory_allocated()
67
+ elif is_mlu_available():
68
+ torch.mlu.empty_cache()
69
+ torch.mlu.memory_allocated() # reset the peak gauge to zero
70
+ self.begin = torch.mlu.max_memory_allocated()
71
+ elif is_npu_available():
72
+ torch.npu.empty_cache()
73
+ self.end = torch.npu.memory_allocated()
74
+ self.peak = torch.npu.max_memory_allocated()
75
+ elif is_xpu_available():
76
+ torch.xpu.empty_cache()
77
+ self.end = torch.xpu.memory_allocated()
78
+ self.peak = torch.xpu.max_memory_allocated()
79
+ self.used = b2mb(self.end - self.begin)
80
+ self.peaked = b2mb(self.peak - self.begin)
81
+ # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
82
+
83
+
84
+ def get_dataloaders(
85
+ accelerator: Accelerator,
86
+ batch_size: int = 16,
87
+ model_name: str = "bert-base-cased",
88
+ n_train: int = 320,
89
+ n_val: int = 160,
90
+ ):
91
+ """
92
+ Creates a set of `DataLoader`s for the `glue` dataset.
93
+
94
+ Args:
95
+ accelerator (`Accelerator`):
96
+ An `Accelerator` object
97
+ batch_size (`int`, *optional*):
98
+ The batch size for the train and validation DataLoaders.
99
+ model_name (`str`, *optional*):
100
+ The name of the model to use.
101
+ n_train (`int`, *optional*):
102
+ The number of training examples to use.
103
+ n_val (`int`, *optional*):
104
+ The number of validation examples to use.
105
+ """
106
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
107
+ datasets = load_dataset(
108
+ "glue", "mrpc", split={"train": f"train[:{n_train}]", "validation": f"validation[:{n_val}]"}
109
+ )
110
+
111
+ def tokenize_function(examples):
112
+ # max_length=None => use the model max length (it's actually the default)
113
+ outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
114
+ return outputs
115
+
116
+ # Apply the method we just defined to all the examples in all the splits of the dataset
117
+ tokenized_datasets = datasets.map(
118
+ tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False
119
+ )
120
+
121
+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
122
+ # transformers library
123
+ tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
124
+
125
+ def collate_fn(examples):
126
+ # On TPU it's best to pad everything to the same length or training will be very slow.
127
+ if accelerator.distributed_type == DistributedType.XLA:
128
+ return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
129
+ return tokenizer.pad(examples, padding="longest", return_tensors="pt")
130
+
131
+ # Instantiate dataloaders.
132
+ train_dataloader = DataLoader(
133
+ tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
134
+ )
135
+ eval_dataloader = DataLoader(
136
+ tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
137
+ )
138
+
139
+ return train_dataloader, eval_dataloader
140
+
141
+
142
+ def training_function(config, args):
143
+ # Initialize accelerator
144
+ accelerator = Accelerator()
145
+
146
+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
147
+ lr = config["lr"]
148
+ num_epochs = int(config["num_epochs"])
149
+ seed = int(config["seed"])
150
+ batch_size = int(config["batch_size"])
151
+ model_name = args.model_name_or_path
152
+
153
+ set_seed(seed)
154
+ train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name, args.n_train, args.n_val)
155
+
156
+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)
157
+ model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True)
158
+
159
+ # Instantiate optimizer
160
+ optimizer_cls = (
161
+ AdamW
162
+ if accelerator.state.deepspeed_plugin is None
163
+ or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
164
+ else DummyOptim
165
+ )
166
+ optimizer = optimizer_cls(params=model.parameters(), lr=lr)
167
+
168
+ if accelerator.state.deepspeed_plugin is not None:
169
+ gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[
170
+ "gradient_accumulation_steps"
171
+ ]
172
+ else:
173
+ gradient_accumulation_steps = 1
174
+ max_training_steps = (len(train_dataloader) * num_epochs) // gradient_accumulation_steps
175
+
176
+ # Instantiate scheduler
177
+ if (
178
+ accelerator.state.deepspeed_plugin is None
179
+ or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
180
+ ):
181
+ lr_scheduler = get_linear_schedule_with_warmup(
182
+ optimizer=optimizer,
183
+ num_warmup_steps=0,
184
+ num_training_steps=max_training_steps,
185
+ )
186
+ else:
187
+ lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0)
188
+
189
+ # Prepare everything
190
+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
191
+ # prepare method.
192
+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
193
+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
194
+ )
195
+
196
+ # We need to keep track of how many total steps we have iterated over
197
+ overall_step = 0
198
+ # We also need to keep track of the stating epoch so files are named properly
199
+ starting_epoch = 0
200
+
201
+ # Now we train the model
202
+ train_total_peak_memory = {}
203
+ for epoch in range(starting_epoch, num_epochs):
204
+ with TorchTracemalloc() as tracemalloc:
205
+ model.train()
206
+ for step, batch in enumerate(train_dataloader):
207
+ outputs = model(**batch)
208
+ loss = outputs.loss
209
+ loss = loss / gradient_accumulation_steps
210
+ accelerator.backward(loss)
211
+ if step % gradient_accumulation_steps == 0:
212
+ optimizer.step()
213
+ lr_scheduler.step()
214
+ optimizer.zero_grad()
215
+
216
+ overall_step += 1
217
+
218
+ # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
219
+ accelerator.print(f"Memory before entering the train : {b2mb(tracemalloc.begin)}")
220
+ accelerator.print(f"Memory consumed at the end of the train (end-begin): {tracemalloc.used}")
221
+ accelerator.print(f"Peak Memory consumed during the train (max-begin): {tracemalloc.peaked}")
222
+ accelerator.print(
223
+ f"Total Peak Memory consumed during the train (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}"
224
+ )
225
+ train_total_peak_memory[f"epoch-{epoch}"] = tracemalloc.peaked + b2mb(tracemalloc.begin)
226
+ if args.peak_memory_upper_bound is not None:
227
+ assert (
228
+ train_total_peak_memory[f"epoch-{epoch}"] <= args.peak_memory_upper_bound
229
+ ), "Peak memory usage exceeded the upper bound"
230
+
231
+ accelerator.wait_for_everyone()
232
+ if accelerator.is_main_process:
233
+ with open(os.path.join(args.output_dir, "peak_memory_utilization.json"), "w") as f:
234
+ json.dump(train_total_peak_memory, f)
235
+
236
+
237
+ def main():
238
+ parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.")
239
+ parser.add_argument(
240
+ "--model_name_or_path",
241
+ type=str,
242
+ default="bert-base-cased",
243
+ help="Path to pretrained model or model identifier from huggingface.co/models.",
244
+ required=False,
245
+ )
246
+ parser.add_argument(
247
+ "--output_dir",
248
+ type=str,
249
+ default=".",
250
+ help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.",
251
+ )
252
+ parser.add_argument(
253
+ "--peak_memory_upper_bound",
254
+ type=float,
255
+ default=None,
256
+ help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.",
257
+ )
258
+ parser.add_argument(
259
+ "--n_train",
260
+ type=int,
261
+ default=320,
262
+ help="Number of training examples to use.",
263
+ )
264
+ parser.add_argument(
265
+ "--n_val",
266
+ type=int,
267
+ default=160,
268
+ help="Number of validation examples to use.",
269
+ )
270
+ parser.add_argument(
271
+ "--num_epochs",
272
+ type=int,
273
+ default=1,
274
+ help="Number of train epochs.",
275
+ )
276
+ args = parser.parse_args()
277
+ config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
278
+ training_function(config, args)
279
+
280
+
281
+ if __name__ == "__main__":
282
+ main()
llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_performance.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import argparse
15
+ import json
16
+ import os
17
+
18
+ import evaluate
19
+ import torch
20
+ from datasets import load_dataset
21
+ from torch.optim import AdamW
22
+ from torch.utils.data import DataLoader
23
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
24
+
25
+ from accelerate import Accelerator, DistributedType
26
+ from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
27
+
28
+
29
+ MAX_GPU_BATCH_SIZE = 16
30
+ EVAL_BATCH_SIZE = 32
31
+
32
+
33
+ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name: str = "bert-base-cased"):
34
+ """
35
+ Creates a set of `DataLoader`s for the `glue` dataset.
36
+
37
+ Args:
38
+ accelerator (`Accelerator`):
39
+ An `Accelerator` object
40
+ batch_size (`int`, *optional*):
41
+ The batch size for the train and validation DataLoaders.
42
+ model_name (`str`, *optional*):
43
+ """
44
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
45
+ datasets = load_dataset("glue", "mrpc")
46
+
47
+ def tokenize_function(examples):
48
+ # max_length=None => use the model max length (it's actually the default)
49
+ outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
50
+ return outputs
51
+
52
+ # Apply the method we just defined to all the examples in all the splits of the dataset
53
+ tokenized_datasets = datasets.map(
54
+ tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False
55
+ )
56
+
57
+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
58
+ # transformers library
59
+ tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
60
+
61
+ def collate_fn(examples):
62
+ # On TPU it's best to pad everything to the same length or training will be very slow.
63
+ if accelerator.distributed_type == DistributedType.XLA:
64
+ return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
65
+ return tokenizer.pad(examples, padding="longest", return_tensors="pt")
66
+
67
+ # Instantiate dataloaders.
68
+ train_dataloader = DataLoader(
69
+ tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
70
+ )
71
+ eval_dataloader = DataLoader(
72
+ tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
73
+ )
74
+
75
+ return train_dataloader, eval_dataloader
76
+
77
+
78
+ def training_function(config, args):
79
+ # Initialize accelerator
80
+ accelerator = Accelerator()
81
+
82
+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
83
+ lr = config["lr"]
84
+ num_epochs = int(config["num_epochs"])
85
+ seed = int(config["seed"])
86
+ batch_size = int(config["batch_size"])
87
+ model_name = args.model_name_or_path
88
+
89
+ set_seed(seed)
90
+ train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name)
91
+
92
+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)
93
+ model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True)
94
+
95
+ # Instantiate optimizer
96
+ optimizer_cls = (
97
+ AdamW
98
+ if accelerator.state.deepspeed_plugin is None
99
+ or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
100
+ else DummyOptim
101
+ )
102
+ optimizer = optimizer_cls(params=model.parameters(), lr=lr)
103
+
104
+ max_training_steps = len(train_dataloader) * num_epochs
105
+
106
+ # Instantiate scheduler
107
+ linear_decay_scheduler = False
108
+ if (
109
+ accelerator.state.deepspeed_plugin is None
110
+ or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
111
+ ):
112
+ lr_scheduler = get_linear_schedule_with_warmup(
113
+ optimizer=optimizer,
114
+ num_warmup_steps=0,
115
+ num_training_steps=max_training_steps,
116
+ )
117
+ linear_decay_scheduler = True
118
+ else:
119
+ lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0)
120
+
121
+ # Prepare everything
122
+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
123
+ # prepare method.
124
+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
125
+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
126
+ )
127
+
128
+ # We also need to keep track of the stating epoch so files are named properly
129
+ starting_epoch = 0
130
+
131
+ # Now we train the model
132
+ metric = evaluate.load("glue", "mrpc")
133
+ best_performance = 0
134
+ performance_metric = {}
135
+ expected_lr_after_first_optim_step = lr * (
136
+ 1 - 1 / (max_training_steps / accelerator.num_processes / accelerator.gradient_accumulation_steps)
137
+ )
138
+ lr_scheduler_check_completed = False
139
+ for epoch in range(starting_epoch, num_epochs):
140
+ model.train()
141
+ for step, batch in enumerate(train_dataloader):
142
+ with accelerator.accumulate(model):
143
+ outputs = model(**batch)
144
+ loss = outputs.loss
145
+ accelerator.backward(loss)
146
+ optimizer.step()
147
+ lr_scheduler.step()
148
+ optimizer.zero_grad()
149
+
150
+ # assert the learning rate after first optimizer step
151
+ if (
152
+ accelerator.sync_gradients
153
+ and not lr_scheduler_check_completed
154
+ and linear_decay_scheduler
155
+ and accelerator.state.mixed_precision == "no"
156
+ ):
157
+ assert (
158
+ lr_scheduler.get_last_lr()[0] == expected_lr_after_first_optim_step
159
+ ), f"Wrong lr found at second step, expected {expected_lr_after_first_optim_step}, got {lr_scheduler.get_last_lr()[0]}"
160
+ lr_scheduler_check_completed = True
161
+
162
+ model.eval()
163
+ samples_seen = 0
164
+ for step, batch in enumerate(eval_dataloader):
165
+ # We could avoid this line since we set the accelerator with `device_placement=True`.
166
+ batch.to(accelerator.device)
167
+ with torch.no_grad():
168
+ outputs = model(**batch)
169
+ predictions = outputs.logits.argmax(dim=-1)
170
+ # It is slightly faster to call this once, than multiple times
171
+ predictions, references = accelerator.gather(
172
+ (predictions, batch["labels"])
173
+ ) # If we are in a multiprocess environment, the last batch has duplicates
174
+ if accelerator.use_distributed:
175
+ if step == len(eval_dataloader) - 1:
176
+ predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]
177
+ references = references[: len(eval_dataloader.dataset) - samples_seen]
178
+ else:
179
+ samples_seen += references.shape[0]
180
+ metric.add_batch(
181
+ predictions=predictions,
182
+ references=references,
183
+ )
184
+
185
+ eval_metric = metric.compute()
186
+ # Use accelerator.print to print only on the main process.
187
+ accelerator.print(f"epoch {epoch}:", eval_metric)
188
+ performance_metric[f"epoch-{epoch}"] = eval_metric["accuracy"]
189
+
190
+ if best_performance < eval_metric["accuracy"]:
191
+ best_performance = eval_metric["accuracy"]
192
+
193
+ # check that the LR is 0
194
+ if linear_decay_scheduler and accelerator.state.mixed_precision == "no":
195
+ assert (
196
+ lr_scheduler.get_last_lr()[0] == 0
197
+ ), f"Wrong lr found at last step, expected 0, got {lr_scheduler.get_last_lr()[0]}"
198
+
199
+ if args.performance_lower_bound is not None:
200
+ assert (
201
+ args.performance_lower_bound <= best_performance
202
+ ), f"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
203
+
204
+ accelerator.wait_for_everyone()
205
+ if accelerator.is_main_process:
206
+ with open(os.path.join(args.output_dir, "all_results.json"), "w") as f:
207
+ json.dump(performance_metric, f)
208
+
209
+
210
+ def main():
211
+ parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.")
212
+ parser.add_argument(
213
+ "--model_name_or_path",
214
+ type=str,
215
+ default="bert-base-cased",
216
+ help="Path to pretrained model or model identifier from huggingface.co/models.",
217
+ required=False,
218
+ )
219
+ parser.add_argument(
220
+ "--output_dir",
221
+ type=str,
222
+ default=".",
223
+ help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.",
224
+ )
225
+ parser.add_argument(
226
+ "--performance_lower_bound",
227
+ type=float,
228
+ default=None,
229
+ help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.",
230
+ )
231
+ parser.add_argument(
232
+ "--num_epochs",
233
+ type=int,
234
+ default=3,
235
+ help="Number of train epochs.",
236
+ )
237
+ args = parser.parse_args()
238
+ config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
239
+ training_function(config, args)
240
+
241
+
242
+ if __name__ == "__main__":
243
+ main()
llmeval-env/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_pippy.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import torch
15
+ from torchvision.models import resnet34
16
+ from transformers import (
17
+ BertConfig,
18
+ BertForMaskedLM,
19
+ GPT2Config,
20
+ GPT2ForSequenceClassification,
21
+ T5Config,
22
+ T5ForConditionalGeneration,
23
+ )
24
+
25
+ from accelerate import PartialState
26
+ from accelerate.inference import prepare_pippy
27
+ from accelerate.utils import DistributedType, send_to_device, set_seed
28
+
29
+
30
+ model_to_config = {
31
+ "t5": (T5ForConditionalGeneration, T5Config, 1024),
32
+ "bert": (BertForMaskedLM, BertConfig, 512),
33
+ "gpt2": (GPT2ForSequenceClassification, GPT2Config, 1024),
34
+ }
35
+
36
+
37
+ def get_model_and_data_for_text(model_name, device, num_processes: int = 2):
38
+ initializer, config, seq_len = model_to_config[model_name]
39
+ config_args = {}
40
+ # Eventually needed for batch inference tests on gpt-2 when bs != 1
41
+ # if model_name == "gpt2":
42
+ # config_args["pad_token_id"] = 0
43
+ model_config = config(**config_args)
44
+ model = initializer(model_config)
45
+ return model, torch.randint(
46
+ low=0,
47
+ high=model_config.vocab_size,
48
+ size=(num_processes, seq_len),
49
+ device=device,
50
+ dtype=torch.int64,
51
+ requires_grad=False,
52
+ )
53
+
54
+
55
+ def test_gpt2(batch_size: int = 2):
56
+ set_seed(42)
57
+ state = PartialState()
58
+ model, inputs = get_model_and_data_for_text("gpt2", "cpu", batch_size)
59
+ model = prepare_pippy(model, example_args=(inputs,), no_split_module_classes=model._no_split_modules)
60
+ # For inference args need to be a tuple
61
+ inputs = inputs.to("cuda")
62
+ with torch.no_grad():
63
+ output = model(inputs)
64
+ # Zach: Check that we just grab the real outputs we need at the end
65
+ if not state.is_last_process:
66
+ assert output is None, "Output was not generated on just the last process!"
67
+ else:
68
+ assert output is not None, "Output was not generated in the last process!"
69
+
70
+
71
+ def test_t5(batch_size: int = 2):
72
+ set_seed(42)
73
+ state = PartialState()
74
+ model, inputs = get_model_and_data_for_text("t5", "cpu", batch_size)
75
+ example_inputs = {"input_ids": inputs, "decoder_input_ids": inputs}
76
+ model = prepare_pippy(
77
+ model,
78
+ no_split_module_classes=model._no_split_modules,
79
+ example_kwargs=example_inputs,
80
+ )
81
+ # For inference args need to be a tuple
82
+ inputs = send_to_device(example_inputs, "cuda:0")
83
+ with torch.no_grad():
84
+ output = model(*inputs.values())
85
+ # Zach: Check that we just grab the real outputs we need at the end
86
+ if not state.is_last_process:
87
+ assert output is None, "Output was not generated on just the last process!"
88
+ else:
89
+ assert output is not None, "Output was not generated in the last process!"
90
+
91
+
92
+ def test_resnet(batch_size: int = 2):
93
+ set_seed(42)
94
+ state = PartialState()
95
+ model = resnet34()
96
+ input_tensor = torch.rand(batch_size, 3, 224, 224)
97
+ model = prepare_pippy(
98
+ model,
99
+ example_args=(input_tensor,),
100
+ )
101
+ inputs = send_to_device(input_tensor, "cuda:0")
102
+ with torch.no_grad():
103
+ output = model(inputs)
104
+ # Zach: Check that we just grab the real outputs we need at the end
105
+ if not state.is_last_process:
106
+ assert output is None, "Output was not generated on just the last process!"
107
+ else:
108
+ assert output is not None, "Output was not generated in the last process!"
109
+
110
+
111
+ if __name__ == "__main__":
112
+ state = PartialState()
113
+ state.print("Testing pippy integration...")
114
+ if state.distributed_type == DistributedType.MULTI_GPU:
115
+ state.print("Testing GPT2...")
116
+ test_gpt2()
117
+ # Issue: When modifying the tokenizer for batch GPT2 inference, there's an issue
118
+ # due to references
119
+ # NameError: cannot access free variable 'chunk_args_list' where it is not associated with a value in enclosing scope
120
+ # test_gpt2(3)
121
+ state.print("Testing T5...")
122
+ test_t5()
123
+ test_t5(1)
124
+ test_t5(3)
125
+ state.print("Testing CV model...")
126
+ test_resnet()
127
+ test_resnet(3)
128
+ else:
129
+ print("Less than two GPUs found, not running tests!")