applied-ai-018 commited on
Commit
1340f33
·
verified ·
1 Parent(s): 2ad4264

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/accelerate/__init__.py +48 -0
  2. llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/big_modeling.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/checkpointing.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/data_loader.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/hooks.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/inference.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/launchers.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/local_sgd.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/logging.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/memory_utils.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/optimizer.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/scheduler.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/state.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/tracking.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/accelerate/accelerator.py +0 -0
  18. llmeval-env/lib/python3.10/site-packages/accelerate/big_modeling.py +627 -0
  19. llmeval-env/lib/python3.10/site-packages/accelerate/checkpointing.py +273 -0
  20. llmeval-env/lib/python3.10/site-packages/accelerate/commands/__init__.py +13 -0
  21. llmeval-env/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py +50 -0
  22. llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/sagemaker.py +267 -0
  23. llmeval-env/lib/python3.10/site-packages/accelerate/commands/env.py +109 -0
  24. llmeval-env/lib/python3.10/site-packages/accelerate/commands/estimate.py +309 -0
  25. llmeval-env/lib/python3.10/site-packages/accelerate/commands/launch.py +1092 -0
  26. llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__init__.py +14 -0
  27. llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/__init__.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/cursor.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/helpers.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/input.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/keymap.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/selection_menu.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/cursor.py +65 -0
  34. llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/helpers.py +59 -0
  35. llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/input.py +86 -0
  36. llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/keymap.py +133 -0
  37. llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/selection_menu.py +144 -0
  38. llmeval-env/lib/python3.10/site-packages/accelerate/commands/test.py +65 -0
  39. llmeval-env/lib/python3.10/site-packages/accelerate/commands/tpu.py +157 -0
  40. llmeval-env/lib/python3.10/site-packages/accelerate/commands/utils.py +120 -0
  41. llmeval-env/lib/python3.10/site-packages/accelerate/data_loader.py +1149 -0
  42. llmeval-env/lib/python3.10/site-packages/accelerate/hooks.py +709 -0
  43. llmeval-env/lib/python3.10/site-packages/accelerate/inference.py +188 -0
  44. llmeval-env/lib/python3.10/site-packages/accelerate/launchers.py +258 -0
  45. llmeval-env/lib/python3.10/site-packages/accelerate/local_sgd.py +102 -0
  46. llmeval-env/lib/python3.10/site-packages/accelerate/logging.py +123 -0
  47. llmeval-env/lib/python3.10/site-packages/accelerate/memory_utils.py +22 -0
  48. llmeval-env/lib/python3.10/site-packages/accelerate/optimizer.py +214 -0
  49. llmeval-env/lib/python3.10/site-packages/accelerate/scheduler.py +98 -0
  50. llmeval-env/lib/python3.10/site-packages/accelerate/state.py +1208 -0
llmeval-env/lib/python3.10/site-packages/accelerate/__init__.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ __version__ = "0.30.0"
15
+
16
+ from .accelerator import Accelerator
17
+ from .big_modeling import (
18
+ cpu_offload,
19
+ cpu_offload_with_hook,
20
+ disk_offload,
21
+ dispatch_model,
22
+ init_empty_weights,
23
+ init_on_device,
24
+ load_checkpoint_and_dispatch,
25
+ )
26
+ from .data_loader import skip_first_batches
27
+ from .inference import prepare_pippy
28
+ from .launchers import debug_launcher, notebook_launcher
29
+ from .state import PartialState
30
+ from .utils import (
31
+ AutocastKwargs,
32
+ DataLoaderConfiguration,
33
+ DeepSpeedPlugin,
34
+ DistributedDataParallelKwargs,
35
+ DistributedType,
36
+ FullyShardedDataParallelPlugin,
37
+ GradScalerKwargs,
38
+ InitProcessGroupKwargs,
39
+ find_executable_batch_size,
40
+ infer_auto_device_map,
41
+ is_rich_available,
42
+ load_checkpoint_in_model,
43
+ synchronize_rng_states,
44
+ )
45
+
46
+
47
+ if is_rich_available():
48
+ from .utils import rich
llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.17 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc ADDED
Binary file (111 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/big_modeling.cpython-310.pyc ADDED
Binary file (23.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/checkpointing.cpython-310.pyc ADDED
Binary file (8.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/data_loader.cpython-310.pyc ADDED
Binary file (35 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/hooks.cpython-310.pyc ADDED
Binary file (22.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/inference.cpython-310.pyc ADDED
Binary file (6.09 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/launchers.cpython-310.pyc ADDED
Binary file (8.06 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/local_sgd.cpython-310.pyc ADDED
Binary file (3.62 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/logging.cpython-310.pyc ADDED
Binary file (4.44 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/memory_utils.cpython-310.pyc ADDED
Binary file (436 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/optimizer.cpython-310.pyc ADDED
Binary file (7.48 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/scheduler.cpython-310.pyc ADDED
Binary file (3.35 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/state.cpython-310.pyc ADDED
Binary file (39.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/tracking.cpython-310.pyc ADDED
Binary file (37.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/accelerator.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/accelerate/big_modeling.py ADDED
@@ -0,0 +1,627 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import logging
16
+ import os
17
+ from contextlib import contextmanager
18
+ from functools import wraps
19
+ from typing import Dict, List, Optional, Union
20
+
21
+ import torch
22
+ import torch.nn as nn
23
+
24
+ from .hooks import (
25
+ AlignDevicesHook,
26
+ CpuOffload,
27
+ UserCpuOffloadHook,
28
+ add_hook_to_module,
29
+ attach_align_device_hook,
30
+ attach_align_device_hook_on_blocks,
31
+ )
32
+ from .utils import (
33
+ OffloadedWeightsLoader,
34
+ check_cuda_p2p_ib_support,
35
+ check_device_map,
36
+ extract_submodules_state_dict,
37
+ find_tied_parameters,
38
+ get_balanced_memory,
39
+ infer_auto_device_map,
40
+ is_mlu_available,
41
+ is_npu_available,
42
+ is_torch_version,
43
+ is_xpu_available,
44
+ load_checkpoint_in_model,
45
+ offload_state_dict,
46
+ parse_flag_from_env,
47
+ retie_parameters,
48
+ )
49
+ from .utils.other import recursive_getattr
50
+
51
+
52
+ logger = logging.getLogger(__name__)
53
+
54
+
55
+ @contextmanager
56
+ def init_empty_weights(include_buffers: bool = None):
57
+ """
58
+ A context manager under which models are initialized with all parameters on the meta device, therefore creating an
59
+ empty model. Useful when just initializing the model would blow the available RAM.
60
+
61
+ Args:
62
+ include_buffers (`bool`, *optional*):
63
+ Whether or not to also put all buffers on the meta device while initializing.
64
+
65
+ Example:
66
+
67
+ ```python
68
+ import torch.nn as nn
69
+ from accelerate import init_empty_weights
70
+
71
+ # Initialize a model with 100 billions parameters in no time and without using any RAM.
72
+ with init_empty_weights():
73
+ tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])
74
+ ```
75
+
76
+ <Tip warning={true}>
77
+
78
+ Any model created under this context manager has no weights. As such you can't do something like
79
+ `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].
80
+ Make sure to overwrite the default device_map param for [`load_checkpoint_and_dispatch`], otherwise dispatch is not
81
+ called.
82
+
83
+ </Tip>
84
+ """
85
+ if include_buffers is None:
86
+ include_buffers = parse_flag_from_env("ACCELERATE_INIT_INCLUDE_BUFFERS", False)
87
+ with init_on_device(torch.device("meta"), include_buffers=include_buffers) as f:
88
+ yield f
89
+
90
+
91
+ @contextmanager
92
+ def init_on_device(device: torch.device, include_buffers: bool = None):
93
+ """
94
+ A context manager under which models are initialized with all parameters on the specified device.
95
+
96
+ Args:
97
+ device (`torch.device`):
98
+ Device to initialize all parameters on.
99
+ include_buffers (`bool`, *optional*):
100
+ Whether or not to also put all buffers on the meta device while initializing.
101
+
102
+ Example:
103
+
104
+ ```python
105
+ import torch.nn as nn
106
+ from accelerate import init_on_device
107
+
108
+ with init_on_device(device=torch.device("cuda")):
109
+ tst = nn.Liner(100, 100) # on `cuda` device
110
+ ```
111
+ """
112
+ if include_buffers is None:
113
+ include_buffers = parse_flag_from_env("ACCELERATE_INIT_INCLUDE_BUFFERS", False)
114
+
115
+ # TODO(shingjan): remove the torch version check once older versions are deprecated
116
+ if is_torch_version(">=", "2.0") and include_buffers:
117
+ with device:
118
+ yield
119
+ return
120
+
121
+ old_register_parameter = nn.Module.register_parameter
122
+ if include_buffers:
123
+ old_register_buffer = nn.Module.register_buffer
124
+
125
+ def register_empty_parameter(module, name, param):
126
+ old_register_parameter(module, name, param)
127
+ if param is not None:
128
+ param_cls = type(module._parameters[name])
129
+ kwargs = module._parameters[name].__dict__
130
+ kwargs["requires_grad"] = param.requires_grad
131
+ module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs)
132
+
133
+ def register_empty_buffer(module, name, buffer, persistent=True):
134
+ old_register_buffer(module, name, buffer, persistent=persistent)
135
+ if buffer is not None:
136
+ module._buffers[name] = module._buffers[name].to(device)
137
+
138
+ # Patch tensor creation
139
+ if include_buffers:
140
+ tensor_constructors_to_patch = {
141
+ torch_function_name: getattr(torch, torch_function_name)
142
+ for torch_function_name in ["empty", "zeros", "ones", "full"]
143
+ }
144
+ else:
145
+ tensor_constructors_to_patch = {}
146
+
147
+ def patch_tensor_constructor(fn):
148
+ def wrapper(*args, **kwargs):
149
+ kwargs["device"] = device
150
+ return fn(*args, **kwargs)
151
+
152
+ return wrapper
153
+
154
+ try:
155
+ nn.Module.register_parameter = register_empty_parameter
156
+ if include_buffers:
157
+ nn.Module.register_buffer = register_empty_buffer
158
+ for torch_function_name in tensor_constructors_to_patch.keys():
159
+ setattr(torch, torch_function_name, patch_tensor_constructor(getattr(torch, torch_function_name)))
160
+ yield
161
+ finally:
162
+ nn.Module.register_parameter = old_register_parameter
163
+ if include_buffers:
164
+ nn.Module.register_buffer = old_register_buffer
165
+ for torch_function_name, old_torch_function in tensor_constructors_to_patch.items():
166
+ setattr(torch, torch_function_name, old_torch_function)
167
+
168
+
169
+ def cpu_offload(
170
+ model: nn.Module,
171
+ execution_device: Optional[torch.device] = None,
172
+ offload_buffers: bool = False,
173
+ state_dict: Optional[Dict[str, torch.Tensor]] = None,
174
+ preload_module_classes: Optional[List[str]] = None,
175
+ ):
176
+ """
177
+ Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one
178
+ copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that
179
+ state dict and put on the execution device passed as they are needed, then offloaded again.
180
+
181
+ Args:
182
+ model (`torch.nn.Module`):
183
+ The model to offload.
184
+ execution_device (`torch.device`, *optional*):
185
+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the
186
+ model first parameter device.
187
+ offload_buffers (`bool`, *optional*, defaults to `False`):
188
+ Whether or not to offload the buffers with the model parameters.
189
+ state_dict (`Dict[str, torch.Tensor]`, *optional*):
190
+ The state dict of the model that will be kept on CPU.
191
+ preload_module_classes (`List[str]`, *optional*):
192
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
193
+ of the forward. This should only be used for classes that have submodules which are registered but not
194
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
195
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
196
+ """
197
+ if execution_device is None:
198
+ execution_device = next(iter(model.parameters())).device
199
+ if state_dict is None:
200
+ state_dict = {n: p.to("cpu") for n, p in model.state_dict().items()}
201
+
202
+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True)
203
+ attach_align_device_hook(
204
+ model,
205
+ execution_device=execution_device,
206
+ offload=True,
207
+ offload_buffers=offload_buffers,
208
+ weights_map=state_dict,
209
+ preload_module_classes=preload_module_classes,
210
+ )
211
+
212
+ return model
213
+
214
+
215
+ def cpu_offload_with_hook(
216
+ model: torch.nn.Module,
217
+ execution_device: Optional[Union[int, str, torch.device]] = None,
218
+ prev_module_hook: Optional[UserCpuOffloadHook] = None,
219
+ ):
220
+ """
221
+ Offloads a model on the CPU and puts it back to an execution device when executed. The difference with
222
+ [`cpu_offload`] is that the model stays on the execution device after the forward and is only offloaded again when
223
+ the `offload` method of the returned `hook` is called. Useful for pipelines running a model in a loop.
224
+
225
+ Args:
226
+ model (`torch.nn.Module`):
227
+ The model to offload.
228
+ execution_device(`str`, `int` or `torch.device`, *optional*):
229
+ The device on which the model should be executed. Will default to the MPS device if it's available, then
230
+ GPU 0 if there is a GPU, and finally to the CPU.
231
+ prev_module_hook (`UserCpuOffloadHook`, *optional*):
232
+ The hook sent back by this function for a previous model in the pipeline you are running. If passed, its
233
+ offload method will be called just before the forward of the model to which this hook is attached.
234
+
235
+ Example:
236
+
237
+ ```py
238
+ model_1, hook_1 = cpu_offload_with_hook(model_1, cuda_device)
239
+ model_2, hook_2 = cpu_offload_with_hook(model_2, cuda_device, prev_module_hook=hook_1)
240
+ model_3, hook_3 = cpu_offload_with_hook(model_3, cuda_device, prev_module_hook=hook_2)
241
+
242
+ hid_1 = model_1(input)
243
+ for i in range(50):
244
+ # model1 is offloaded on the CPU at the first iteration, model 2 stays on the GPU for this whole loop.
245
+ hid_2 = model_2(hid_1)
246
+ # model2 is offloaded to the CPU just before this forward.
247
+ hid_3 = model_3(hid_3)
248
+
249
+ # For model3, you need to manually call the hook offload method.
250
+ hook_3.offload()
251
+ ```
252
+ """
253
+ hook = CpuOffload(execution_device=execution_device, prev_module_hook=prev_module_hook)
254
+ add_hook_to_module(model, hook, append=True)
255
+ user_hook = UserCpuOffloadHook(model, hook)
256
+ return model, user_hook
257
+
258
+
259
+ def disk_offload(
260
+ model: nn.Module,
261
+ offload_dir: Union[str, os.PathLike],
262
+ execution_device: Optional[torch.device] = None,
263
+ offload_buffers: bool = False,
264
+ preload_module_classes: Optional[List[str]] = None,
265
+ ):
266
+ """
267
+ Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as
268
+ memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and
269
+ put on the execution device passed as they are needed, then offloaded again.
270
+
271
+ Args:
272
+ model (`torch.nn.Module`): The model to offload.
273
+ offload_dir (`str` or `os.PathLike`):
274
+ The folder in which to offload the model weights (or where the model weights are already offloaded).
275
+ execution_device (`torch.device`, *optional*):
276
+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the
277
+ model's first parameter device.
278
+ offload_buffers (`bool`, *optional*, defaults to `False`):
279
+ Whether or not to offload the buffers with the model parameters.
280
+ preload_module_classes (`List[str]`, *optional*):
281
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
282
+ of the forward. This should only be used for classes that have submodules which are registered but not
283
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
284
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
285
+ """
286
+ if not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, "index.json")):
287
+ offload_state_dict(offload_dir, model.state_dict())
288
+ if execution_device is None:
289
+ execution_device = next(iter(model.parameters())).device
290
+ weights_map = OffloadedWeightsLoader(save_folder=offload_dir)
291
+
292
+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True)
293
+ attach_align_device_hook(
294
+ model,
295
+ execution_device=execution_device,
296
+ offload=True,
297
+ offload_buffers=offload_buffers,
298
+ weights_map=weights_map,
299
+ preload_module_classes=preload_module_classes,
300
+ )
301
+
302
+ return model
303
+
304
+
305
+ def dispatch_model(
306
+ model: nn.Module,
307
+ device_map: Dict[str, Union[str, int, torch.device]],
308
+ main_device: Optional[torch.device] = None,
309
+ state_dict: Optional[Dict[str, torch.Tensor]] = None,
310
+ offload_dir: Optional[Union[str, os.PathLike]] = None,
311
+ offload_index: Optional[Dict[str, str]] = None,
312
+ offload_buffers: bool = False,
313
+ skip_keys: Optional[Union[str, List[str]]] = None,
314
+ preload_module_classes: Optional[List[str]] = None,
315
+ force_hooks: bool = False,
316
+ ):
317
+ """
318
+ Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on
319
+ the CPU or even the disk.
320
+
321
+ Args:
322
+ model (`torch.nn.Module`):
323
+ The model to dispatch.
324
+ device_map (`Dict[str, Union[str, int, torch.device]]`):
325
+ A dictionary mapping module names in the models `state_dict` to the device they should go to. Note that
326
+ `"disk"` is accepted even if it's not a proper value for `torch.device`.
327
+ main_device (`str`, `int` or `torch.device`, *optional*):
328
+ The main execution device. Will default to the first device in the `device_map` different from `"cpu"` or
329
+ `"disk"`.
330
+ state_dict (`Dict[str, torch.Tensor]`, *optional*):
331
+ The state dict of the part of the model that will be kept on CPU.
332
+ offload_dir (`str` or `os.PathLike`):
333
+ The folder in which to offload the model weights (or where the model weights are already offloaded).
334
+ offload_index (`Dict`, *optional*):
335
+ A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default
336
+ to the index saved in `save_folder`.
337
+ offload_buffers (`bool`, *optional*, defaults to `False`):
338
+ Whether or not to offload the buffers with the model parameters.
339
+ skip_keys (`str` or `List[str]`, *optional*):
340
+ A list of keys to ignore when moving inputs or outputs between devices.
341
+ preload_module_classes (`List[str]`, *optional*):
342
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
343
+ of the forward. This should only be used for classes that have submodules which are registered but not
344
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
345
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
346
+ force_hooks (`bool`, *optional*, defaults to `False`):
347
+ Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a
348
+ single device.
349
+ """
350
+ # Error early if the device map is incomplete.
351
+ check_device_map(model, device_map)
352
+
353
+ # for backward compatibility
354
+ is_bnb_quantized = (
355
+ getattr(model, "is_quantized", False) or getattr(model, "is_loaded_in_8bit", False)
356
+ ) and getattr(model, "quantization_method", "bitsandbytes") == "bitsandbytes"
357
+
358
+ # We attach hooks if the device_map has at least 2 different devices or if
359
+ # force_hooks is set to `True`. Otherwise, the model in already loaded
360
+ # in the unique device and the user can decide where to dispatch the model.
361
+ # If the model is quantized, we always force-dispatch the model
362
+ if (len(set(device_map.values())) > 1) or is_bnb_quantized or force_hooks:
363
+ if main_device is None:
364
+ if set(device_map.values()) == {"cpu"} or set(device_map.values()) == {"cpu", "disk"}:
365
+ main_device = "cpu"
366
+ else:
367
+ main_device = [d for d in device_map.values() if d not in ["cpu", "disk"]][0]
368
+
369
+ if main_device != "cpu":
370
+ cpu_modules = [name for name, device in device_map.items() if device == "cpu"]
371
+ if state_dict is None and len(cpu_modules) > 0:
372
+ state_dict = extract_submodules_state_dict(model.state_dict(), cpu_modules)
373
+
374
+ disk_modules = [name for name, device in device_map.items() if device == "disk"]
375
+ if offload_dir is None and offload_index is None and len(disk_modules) > 0:
376
+ raise ValueError(
377
+ "We need an `offload_dir` to dispatch this model according to this `device_map`, the following submodules "
378
+ f"need to be offloaded: {', '.join(disk_modules)}."
379
+ )
380
+ if (
381
+ len(disk_modules) > 0
382
+ and offload_index is None
383
+ and (not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, "index.json")))
384
+ ):
385
+ disk_state_dict = extract_submodules_state_dict(model.state_dict(), disk_modules)
386
+ offload_state_dict(offload_dir, disk_state_dict)
387
+
388
+ execution_device = {
389
+ name: main_device if device in ["cpu", "disk"] else device for name, device in device_map.items()
390
+ }
391
+ execution_device[""] = main_device
392
+ offloaded_devices = ["disk"] if main_device == "cpu" or main_device == "mps" else ["cpu", "disk"]
393
+ offload = {name: device in offloaded_devices for name, device in device_map.items()}
394
+ save_folder = offload_dir if len(disk_modules) > 0 else None
395
+ if state_dict is not None or save_folder is not None or offload_index is not None:
396
+ device = main_device if offload_index is not None else None
397
+ weights_map = OffloadedWeightsLoader(
398
+ state_dict=state_dict, save_folder=save_folder, index=offload_index, device=device
399
+ )
400
+ else:
401
+ weights_map = None
402
+
403
+ # When dispatching the model's parameters to the devices specified in device_map, we want to avoid allocating memory several times for the
404
+ # tied parameters. The dictionary tied_params_map keeps track of the already allocated data for a given tied parameter (represented by its
405
+ # original pointer) on each devices.
406
+ tied_params = find_tied_parameters(model)
407
+
408
+ tied_params_map = {}
409
+ for group in tied_params:
410
+ for param_name in group:
411
+ # data_ptr() is enough here, as `find_tied_parameters` finds tied params simply by comparing `param1 is param2`, so we don't need
412
+ # to care about views of tensors through storage_offset.
413
+ data_ptr = recursive_getattr(model, param_name).data_ptr()
414
+ tied_params_map[data_ptr] = {}
415
+
416
+ # Note: To handle the disk offloading case, we can not simply use weights_map[param_name].data_ptr() as the reference pointer,
417
+ # as we have no guarantee that safetensors' `file.get_tensor()` will always give the same pointer.
418
+
419
+ attach_align_device_hook_on_blocks(
420
+ model,
421
+ execution_device=execution_device,
422
+ offload=offload,
423
+ offload_buffers=offload_buffers,
424
+ weights_map=weights_map,
425
+ skip_keys=skip_keys,
426
+ preload_module_classes=preload_module_classes,
427
+ tied_params_map=tied_params_map,
428
+ )
429
+
430
+ # warn if there is any params on the meta device
431
+ offloaded_devices_str = " and ".join(
432
+ [device for device in set(device_map.values()) if device in ("cpu", "disk")]
433
+ )
434
+ if len(offloaded_devices_str) > 0:
435
+ logging.warning(
436
+ f"Some parameters are on the meta device device because they were offloaded to the {offloaded_devices_str}."
437
+ )
438
+
439
+ # Attaching the hook may break tied weights, so we retie them
440
+ retie_parameters(model, tied_params)
441
+
442
+ # add warning to cuda and to method
443
+ def add_warning(fn, model):
444
+ @wraps(fn)
445
+ def wrapper(*args, **kwargs):
446
+ warning_msg = "You shouldn't move a model that is dispatched using accelerate hooks."
447
+ if str(fn.__name__) == "to":
448
+ to_device = torch._C._nn._parse_to(*args, **kwargs)[0]
449
+ if to_device is not None:
450
+ logger.warning(warning_msg)
451
+ else:
452
+ logger.warning(warning_msg)
453
+ for param in model.parameters():
454
+ if param.device == torch.device("meta"):
455
+ raise RuntimeError("You can't move a model that has some modules offloaded to cpu or disk.")
456
+ return fn(*args, **kwargs)
457
+
458
+ return wrapper
459
+
460
+ model.to = add_warning(model.to, model)
461
+ if is_npu_available():
462
+ model.npu = add_warning(model.npu, model)
463
+ elif is_mlu_available():
464
+ model.mlu = add_warning(model.mlu, model)
465
+ elif is_xpu_available():
466
+ model.xpu = add_warning(model.xpu, model)
467
+ else:
468
+ model.cuda = add_warning(model.cuda, model)
469
+
470
+ # Check if we are using multi-gpus with RTX 4000 series
471
+ use_multi_gpu = len([device for device in set(device_map.values()) if device not in ("cpu", "disk")]) > 1
472
+ if use_multi_gpu and not check_cuda_p2p_ib_support():
473
+ logger.warning(
474
+ "We've detected an older driver with an RTX 4000 series GPU. These drivers have issues with P2P. "
475
+ "This can affect the multi-gpu inference when using accelerate device_map."
476
+ "Please make sure to update your driver to the latest version which resolves this."
477
+ )
478
+ else:
479
+ device = list(device_map.values())[0]
480
+ # `torch.Tensor.to(<int num>)` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)).
481
+ if is_npu_available() and isinstance(device, int):
482
+ device = f"npu:{device}"
483
+ elif is_mlu_available() and isinstance(device, int):
484
+ device = f"mlu:{device}"
485
+ elif is_xpu_available() and isinstance(device, int):
486
+ device = f"xpu:{device}"
487
+ if device != "disk":
488
+ model.to(device)
489
+ else:
490
+ raise ValueError(
491
+ "You are trying to offload the whole model to the disk. Please use the `disk_offload` function instead."
492
+ )
493
+ # Convert OrderedDict back to dict for easier usage
494
+ model.hf_device_map = dict(device_map)
495
+ return model
496
+
497
+
498
+ def load_checkpoint_and_dispatch(
499
+ model: nn.Module,
500
+ checkpoint: Union[str, os.PathLike],
501
+ device_map: Optional[Union[str, Dict[str, Union[int, str, torch.device]]]] = None,
502
+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,
503
+ no_split_module_classes: Optional[List[str]] = None,
504
+ offload_folder: Optional[Union[str, os.PathLike]] = None,
505
+ offload_buffers: bool = False,
506
+ dtype: Optional[Union[str, torch.dtype]] = None,
507
+ offload_state_dict: Optional[bool] = None,
508
+ skip_keys: Optional[Union[str, List[str]]] = None,
509
+ preload_module_classes: Optional[List[str]] = None,
510
+ force_hooks: bool = False,
511
+ strict: bool = False,
512
+ ):
513
+ """
514
+ Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are
515
+ loaded and adds the various hooks that will make this model run properly (even if split across devices).
516
+
517
+ Args:
518
+ model (`torch.nn.Module`): The model in which we want to load a checkpoint.
519
+ checkpoint (`str` or `os.PathLike`):
520
+ The folder checkpoint to load. It can be:
521
+ - a path to a file containing a whole model state dict
522
+ - a path to a `.json` file containing the index to a sharded checkpoint
523
+ - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint.
524
+ device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*):
525
+ A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer
526
+ name, once a given module name is inside, every submodule of it will be sent to the same device.
527
+
528
+ To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more
529
+ information about each option see [here](../concept_guides/big_model_inference#designing-a-device-map).
530
+ Defaults to None, which means [`dispatch_model`] will not be called.
531
+ max_memory (`Dict`, *optional*):
532
+ A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU
533
+ and the available CPU RAM if unset.
534
+ no_split_module_classes (`List[str]`, *optional*):
535
+ A list of layer class names that should never be split across device (for instance any layer that has a
536
+ residual connection).
537
+ offload_folder (`str` or `os.PathLike`, *optional*):
538
+ If the `device_map` contains any value `"disk"`, the folder where we will offload weights.
539
+ offload_buffers (`bool`, *optional*, defaults to `False`):
540
+ In the layers that are offloaded on the CPU or the hard drive, whether or not to offload the buffers as
541
+ well as the parameters.
542
+ dtype (`str` or `torch.dtype`, *optional*):
543
+ If provided, the weights will be converted to that type when loaded.
544
+ offload_state_dict (`bool`, *optional*):
545
+ If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if
546
+ the weight of the CPU state dict + the biggest shard does not fit. Will default to `True` if the device map
547
+ picked contains `"disk"` values.
548
+ skip_keys (`str` or `List[str]`, *optional*):
549
+ A list of keys to ignore when moving inputs or outputs between devices.
550
+ preload_module_classes (`List[str]`, *optional*):
551
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
552
+ of the forward. This should only be used for classes that have submodules which are registered but not
553
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
554
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
555
+ force_hooks (`bool`, *optional*, defaults to `False`):
556
+ Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a
557
+ single device.
558
+ strict (`bool`, *optional*, defaults to `False`):
559
+ Whether to strictly enforce that the keys in the checkpoint state_dict match the keys of the model's
560
+ state_dict.
561
+
562
+ Example:
563
+
564
+ ```python
565
+ >>> from accelerate import init_empty_weights, load_checkpoint_and_dispatch
566
+ >>> from huggingface_hub import hf_hub_download
567
+ >>> from transformers import AutoConfig, AutoModelForCausalLM
568
+
569
+ >>> # Download the Weights
570
+ >>> checkpoint = "EleutherAI/gpt-j-6B"
571
+ >>> weights_location = hf_hub_download(checkpoint, "pytorch_model.bin")
572
+
573
+ >>> # Create a model and initialize it with empty weights
574
+ >>> config = AutoConfig.from_pretrained(checkpoint)
575
+ >>> with init_empty_weights():
576
+ ... model = AutoModelForCausalLM.from_config(config)
577
+
578
+ >>> # Load the checkpoint and dispatch it to the right devices
579
+ >>> model = load_checkpoint_and_dispatch(
580
+ ... model, weights_location, device_map="auto", no_split_module_classes=["GPTJBlock"]
581
+ ... )
582
+ ```
583
+ """
584
+ if isinstance(device_map, str) and device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
585
+ raise ValueError(
586
+ "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
587
+ "'sequential'."
588
+ )
589
+ if isinstance(device_map, str):
590
+ if device_map != "sequential":
591
+ max_memory = get_balanced_memory(
592
+ model,
593
+ max_memory=max_memory,
594
+ no_split_module_classes=no_split_module_classes,
595
+ dtype=dtype,
596
+ low_zero=(device_map == "balanced_low_0"),
597
+ )
598
+ device_map = infer_auto_device_map(
599
+ model,
600
+ max_memory=max_memory,
601
+ no_split_module_classes=no_split_module_classes,
602
+ dtype=dtype,
603
+ offload_buffers=offload_buffers,
604
+ )
605
+ if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
606
+ offload_state_dict = True
607
+ load_checkpoint_in_model(
608
+ model,
609
+ checkpoint,
610
+ device_map=device_map,
611
+ offload_folder=offload_folder,
612
+ dtype=dtype,
613
+ offload_state_dict=offload_state_dict,
614
+ offload_buffers=offload_buffers,
615
+ strict=strict,
616
+ )
617
+ if device_map is None:
618
+ return model
619
+ return dispatch_model(
620
+ model,
621
+ device_map=device_map,
622
+ offload_dir=offload_folder,
623
+ offload_buffers=offload_buffers,
624
+ skip_keys=skip_keys,
625
+ preload_module_classes=preload_module_classes,
626
+ force_hooks=force_hooks,
627
+ )
llmeval-env/lib/python3.10/site-packages/accelerate/checkpointing.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import random
16
+ from pathlib import Path
17
+ from typing import List
18
+
19
+ import numpy as np
20
+ import torch
21
+ from safetensors.torch import load_file
22
+ from torch.cuda.amp import GradScaler
23
+
24
+ from .utils import (
25
+ MODEL_NAME,
26
+ OPTIMIZER_NAME,
27
+ RNG_STATE_NAME,
28
+ SAFE_MODEL_NAME,
29
+ SAFE_WEIGHTS_NAME,
30
+ SAMPLER_NAME,
31
+ SCALER_NAME,
32
+ SCHEDULER_NAME,
33
+ WEIGHTS_NAME,
34
+ get_pretty_name,
35
+ is_torch_xla_available,
36
+ is_xpu_available,
37
+ save,
38
+ )
39
+
40
+
41
+ if is_torch_xla_available():
42
+ import torch_xla.core.xla_model as xm
43
+
44
+ from .logging import get_logger
45
+ from .state import PartialState
46
+
47
+
48
+ logger = get_logger(__name__)
49
+
50
+
51
+ def save_accelerator_state(
52
+ output_dir: str,
53
+ model_states: List[dict],
54
+ optimizers: list,
55
+ schedulers: list,
56
+ dataloaders: list,
57
+ process_index: int,
58
+ scaler: GradScaler = None,
59
+ save_on_each_node: bool = False,
60
+ safe_serialization: bool = True,
61
+ ):
62
+ """
63
+ Saves the current states of the models, optimizers, scaler, and RNG generators to a given directory.
64
+
65
+ <Tip>
66
+
67
+ If `safe_serialization` is `True`, models will be saved with `safetensors` while the rest are saved using native
68
+ `pickle`.
69
+
70
+ </Tip>
71
+
72
+ Args:
73
+ output_dir (`str` or `os.PathLike`):
74
+ The name of the folder to save all relevant weights and states.
75
+ model_states (`List[torch.nn.Module]`):
76
+ A list of model states
77
+ optimizers (`List[torch.optim.Optimizer]`):
78
+ A list of optimizer instances
79
+ schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`):
80
+ A list of learning rate schedulers
81
+ dataloaders (`List[torch.utils.data.DataLoader]`):
82
+ A list of dataloader instances to save their sampler states
83
+ process_index (`int`):
84
+ The current process index in the Accelerator state
85
+ scaler (`torch.cuda.amp.GradScaler`, *optional*):
86
+ An optional gradient scaler instance to save
87
+ save_on_each_node (`bool`, *optional*):
88
+ Whether to save on every node, or only the main node.
89
+ safe_serialization (`bool`, *optional*, defaults to `True`):
90
+ Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
91
+ """
92
+ output_dir = Path(output_dir)
93
+ # Model states
94
+ for i, state in enumerate(model_states):
95
+ weights_name = WEIGHTS_NAME if not safe_serialization else SAFE_WEIGHTS_NAME
96
+ if i > 0:
97
+ weights_name = weights_name.replace(".", f"_{i}.")
98
+ output_model_file = output_dir.joinpath(weights_name)
99
+ save(state, output_model_file, save_on_each_node=save_on_each_node, safe_serialization=safe_serialization)
100
+ logger.info(f"Model weights saved in {output_model_file}")
101
+ # Optimizer states
102
+ for i, opt in enumerate(optimizers):
103
+ state = opt.state_dict()
104
+ optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin"
105
+ output_optimizer_file = output_dir.joinpath(optimizer_name)
106
+ save(state, output_optimizer_file, save_on_each_node=save_on_each_node, safe_serialization=False)
107
+ logger.info(f"Optimizer state saved in {output_optimizer_file}")
108
+ # Scheduler states
109
+ for i, scheduler in enumerate(schedulers):
110
+ state = scheduler.state_dict()
111
+ scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin"
112
+ output_scheduler_file = output_dir.joinpath(scheduler_name)
113
+ save(state, output_scheduler_file, save_on_each_node=save_on_each_node, safe_serialization=False)
114
+ logger.info(f"Scheduler state saved in {output_scheduler_file}")
115
+ # DataLoader states
116
+ for i, dataloader in enumerate(dataloaders):
117
+ sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin"
118
+ output_sampler_file = output_dir.joinpath(sampler_name)
119
+ # Only save if we have our custom sampler
120
+ from .data_loader import IterableDatasetShard, SeedableRandomSampler
121
+
122
+ if isinstance(dataloader.dataset, IterableDatasetShard):
123
+ sampler = dataloader.get_sampler()
124
+ if isinstance(sampler, SeedableRandomSampler):
125
+ save(sampler, output_sampler_file, save_on_each_node=save_on_each_node, safe_serialization=False)
126
+ logger.info(f"Sampler state for dataloader {i} saved in {output_sampler_file}")
127
+
128
+ # GradScaler state
129
+ if scaler is not None:
130
+ state = scaler.state_dict()
131
+ output_scaler_file = output_dir.joinpath(SCALER_NAME)
132
+ torch.save(state, output_scaler_file)
133
+ logger.info(f"Gradient scaler state saved in {output_scaler_file}")
134
+ # Random number generator states
135
+ states = {}
136
+ states_name = f"{RNG_STATE_NAME}_{process_index}.pkl"
137
+ states["random_state"] = random.getstate()
138
+ states["numpy_random_seed"] = np.random.get_state()
139
+ states["torch_manual_seed"] = torch.get_rng_state()
140
+ if is_xpu_available():
141
+ states["torch_xpu_manual_seed"] = torch.xpu.get_rng_state_all()
142
+ else:
143
+ states["torch_cuda_manual_seed"] = torch.cuda.get_rng_state_all()
144
+ if is_torch_xla_available():
145
+ states["xm_seed"] = xm.get_rng_state()
146
+ output_states_file = output_dir.joinpath(states_name)
147
+ torch.save(states, output_states_file)
148
+ logger.info(f"Random states saved in {output_states_file}")
149
+ return output_dir
150
+
151
+
152
+ def load_accelerator_state(
153
+ input_dir,
154
+ models,
155
+ optimizers,
156
+ schedulers,
157
+ dataloaders,
158
+ process_index,
159
+ scaler=None,
160
+ map_location=None,
161
+ **load_model_func_kwargs,
162
+ ):
163
+ """
164
+ Loads states of the models, optimizers, scaler, and RNG generators from a given directory.
165
+
166
+ Args:
167
+ input_dir (`str` or `os.PathLike`):
168
+ The name of the folder to load all relevant weights and states.
169
+ models (`List[torch.nn.Module]`):
170
+ A list of model instances
171
+ optimizers (`List[torch.optim.Optimizer]`):
172
+ A list of optimizer instances
173
+ schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`):
174
+ A list of learning rate schedulers
175
+ process_index (`int`):
176
+ The current process index in the Accelerator state
177
+ scaler (`torch.cuda.amp.GradScaler`, *optional*):
178
+ An optional *GradScaler* instance to load
179
+ map_location (`str`, *optional*):
180
+ What device to load the optimizer state onto. Should be one of either "cpu" or "on_device".
181
+ load_model_func_kwargs (`dict`, *optional*):
182
+ Additional arguments that can be passed to the model's `load_state_dict` method.
183
+ """
184
+ if map_location not in [None, "cpu", "on_device"]:
185
+ raise TypeError(
186
+ "Unsupported optimizer map location passed, please choose one of `None`, `'cpu'`, or `'on_device'`"
187
+ )
188
+ if map_location is None:
189
+ map_location = "cpu"
190
+ elif map_location == "on_device":
191
+ map_location = PartialState().device
192
+
193
+ input_dir = Path(input_dir)
194
+ # Model states
195
+ for i, model in enumerate(models):
196
+ ending = f"_{i}" if i > 0 else ""
197
+ input_model_file = input_dir.joinpath(f"{SAFE_MODEL_NAME}{ending}.safetensors")
198
+ if input_model_file.exists():
199
+ state_dict = load_file(input_model_file, device=str(map_location))
200
+ else:
201
+ # Load with torch
202
+ input_model_file = input_dir.joinpath(f"{MODEL_NAME}{ending}.bin")
203
+ state_dict = torch.load(input_model_file, map_location=map_location)
204
+ models[i].load_state_dict(state_dict, **load_model_func_kwargs)
205
+ logger.info("All model weights loaded successfully")
206
+
207
+ # Optimizer states
208
+ for i, opt in enumerate(optimizers):
209
+ optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin"
210
+ input_optimizer_file = input_dir.joinpath(optimizer_name)
211
+ optimizer_state = torch.load(input_optimizer_file, map_location=map_location)
212
+ optimizers[i].load_state_dict(optimizer_state)
213
+ logger.info("All optimizer states loaded successfully")
214
+
215
+ # Scheduler states
216
+ for i, scheduler in enumerate(schedulers):
217
+ scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin"
218
+ input_scheduler_file = input_dir.joinpath(scheduler_name)
219
+ scheduler.load_state_dict(torch.load(input_scheduler_file))
220
+ logger.info("All scheduler states loaded successfully")
221
+
222
+ for i, dataloader in enumerate(dataloaders):
223
+ sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin"
224
+ input_sampler_file = input_dir.joinpath(sampler_name)
225
+ # Only load if we have our custom sampler
226
+ from .data_loader import IterableDatasetShard, SeedableRandomSampler
227
+
228
+ if isinstance(dataloader.dataset, IterableDatasetShard):
229
+ sampler = dataloader.get_sampler()
230
+ if isinstance(sampler, SeedableRandomSampler):
231
+ sampler = dataloader.set_sampler(torch.load(input_sampler_file))
232
+ logger.info("All dataloader sampler states loaded successfully")
233
+
234
+ # GradScaler state
235
+ if scaler is not None:
236
+ input_scaler_file = input_dir.joinpath(SCALER_NAME)
237
+ scaler.load_state_dict(torch.load(input_scaler_file))
238
+ logger.info("GradScaler state loaded successfully")
239
+
240
+ # Random states
241
+ try:
242
+ states = torch.load(input_dir.joinpath(f"{RNG_STATE_NAME}_{process_index}.pkl"))
243
+ random.setstate(states["random_state"])
244
+ np.random.set_state(states["numpy_random_seed"])
245
+ torch.set_rng_state(states["torch_manual_seed"])
246
+ if is_xpu_available():
247
+ torch.xpu.set_rng_state_all(states["torch_xpu_manual_seed"])
248
+ else:
249
+ torch.cuda.set_rng_state_all(states["torch_cuda_manual_seed"])
250
+ if is_torch_xla_available():
251
+ xm.set_rng_state(states["xm_seed"])
252
+ logger.info("All random states loaded successfully")
253
+ except Exception:
254
+ logger.info("Could not load random states")
255
+
256
+
257
+ def save_custom_state(obj, path, index: int = 0, save_on_each_node: bool = False):
258
+ """
259
+ Saves the state of `obj` to `{path}/custom_checkpoint_{index}.pkl`
260
+ """
261
+ # Should this be the right way to get a qual_name type value from `obj`?
262
+ save_location = Path(path) / f"custom_checkpoint_{index}.pkl"
263
+ logger.info(f"Saving the state of {get_pretty_name(obj)} to {save_location}")
264
+ save(obj.state_dict(), save_location, save_on_each_node=save_on_each_node)
265
+
266
+
267
+ def load_custom_state(obj, path, index: int = 0):
268
+ """
269
+ Loads the state of `obj` at `{path}/custom_checkpoint_{index}.pkl`
270
+ """
271
+ load_location = f"{path}/custom_checkpoint_{index}.pkl"
272
+ logger.info(f"Loading the state of {get_pretty_name(obj)} from {load_location}")
273
+ obj.load_state_dict(torch.load(load_location, map_location="cpu"))
llmeval-env/lib/python3.10/site-packages/accelerate/commands/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
llmeval-env/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from accelerate.commands.config import get_config_parser
18
+ from accelerate.commands.env import env_command_parser
19
+ from accelerate.commands.estimate import estimate_command_parser
20
+ from accelerate.commands.launch import launch_command_parser
21
+ from accelerate.commands.test import test_command_parser
22
+ from accelerate.commands.tpu import tpu_command_parser
23
+ from accelerate.commands.utils import CustomArgumentParser
24
+
25
+
26
+ def main():
27
+ parser = CustomArgumentParser("Accelerate CLI tool", usage="accelerate <command> [<args>]", allow_abbrev=False)
28
+ subparsers = parser.add_subparsers(help="accelerate command helpers")
29
+
30
+ # Register commands
31
+ get_config_parser(subparsers=subparsers)
32
+ estimate_command_parser(subparsers=subparsers)
33
+ env_command_parser(subparsers=subparsers)
34
+ launch_command_parser(subparsers=subparsers)
35
+ tpu_command_parser(subparsers=subparsers)
36
+ test_command_parser(subparsers=subparsers)
37
+
38
+ # Let's go
39
+ args = parser.parse_args()
40
+
41
+ if not hasattr(args, "func"):
42
+ parser.print_help()
43
+ exit(1)
44
+
45
+ # Run
46
+ args.func(args)
47
+
48
+
49
+ if __name__ == "__main__":
50
+ main()
llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/sagemaker.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import json
17
+ import os
18
+
19
+ from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
20
+ from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
21
+ from ...utils.imports import is_boto3_available
22
+ from .config_args import SageMakerConfig
23
+ from .config_utils import (
24
+ DYNAMO_BACKENDS,
25
+ _ask_field,
26
+ _ask_options,
27
+ _convert_dynamo_backend,
28
+ _convert_mixed_precision,
29
+ _convert_sagemaker_distributed_mode,
30
+ _convert_yes_no_to_bool,
31
+ )
32
+
33
+
34
+ if is_boto3_available():
35
+ import boto3 # noqa: F401
36
+
37
+
38
+ def _create_iam_role_for_sagemaker(role_name):
39
+ iam_client = boto3.client("iam")
40
+
41
+ sagemaker_trust_policy = {
42
+ "Version": "2012-10-17",
43
+ "Statement": [
44
+ {"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"}
45
+ ],
46
+ }
47
+ try:
48
+ # create the role, associated with the chosen trust policy
49
+ iam_client.create_role(
50
+ RoleName=role_name, AssumeRolePolicyDocument=json.dumps(sagemaker_trust_policy, indent=2)
51
+ )
52
+ policy_document = {
53
+ "Version": "2012-10-17",
54
+ "Statement": [
55
+ {
56
+ "Effect": "Allow",
57
+ "Action": [
58
+ "sagemaker:*",
59
+ "ecr:GetDownloadUrlForLayer",
60
+ "ecr:BatchGetImage",
61
+ "ecr:BatchCheckLayerAvailability",
62
+ "ecr:GetAuthorizationToken",
63
+ "cloudwatch:PutMetricData",
64
+ "cloudwatch:GetMetricData",
65
+ "cloudwatch:GetMetricStatistics",
66
+ "cloudwatch:ListMetrics",
67
+ "logs:CreateLogGroup",
68
+ "logs:CreateLogStream",
69
+ "logs:DescribeLogStreams",
70
+ "logs:PutLogEvents",
71
+ "logs:GetLogEvents",
72
+ "s3:CreateBucket",
73
+ "s3:ListBucket",
74
+ "s3:GetBucketLocation",
75
+ "s3:GetObject",
76
+ "s3:PutObject",
77
+ ],
78
+ "Resource": "*",
79
+ }
80
+ ],
81
+ }
82
+ # attach policy to role
83
+ iam_client.put_role_policy(
84
+ RoleName=role_name,
85
+ PolicyName=f"{role_name}_policy_permission",
86
+ PolicyDocument=json.dumps(policy_document, indent=2),
87
+ )
88
+ except iam_client.exceptions.EntityAlreadyExistsException:
89
+ print(f"role {role_name} already exists. Using existing one")
90
+
91
+
92
+ def _get_iam_role_arn(role_name):
93
+ iam_client = boto3.client("iam")
94
+ return iam_client.get_role(RoleName=role_name)["Role"]["Arn"]
95
+
96
+
97
+ def get_sagemaker_input():
98
+ credentials_configuration = _ask_options(
99
+ "How do you want to authorize?",
100
+ ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "],
101
+ int,
102
+ )
103
+ aws_profile = None
104
+ if credentials_configuration == 0:
105
+ aws_profile = _ask_field("Enter your AWS Profile name: [default] ", default="default")
106
+ os.environ["AWS_PROFILE"] = aws_profile
107
+ else:
108
+ print(
109
+ "Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"
110
+ "`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`"
111
+ )
112
+ aws_access_key_id = _ask_field("AWS Access Key ID: ")
113
+ os.environ["AWS_ACCESS_KEY_ID"] = aws_access_key_id
114
+
115
+ aws_secret_access_key = _ask_field("AWS Secret Access Key: ")
116
+ os.environ["AWS_SECRET_ACCESS_KEY"] = aws_secret_access_key
117
+
118
+ aws_region = _ask_field("Enter your AWS Region: [us-east-1]", default="us-east-1")
119
+ os.environ["AWS_DEFAULT_REGION"] = aws_region
120
+
121
+ role_management = _ask_options(
122
+ "Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?",
123
+ ["Provide IAM Role name", "Create new IAM role using credentials"],
124
+ int,
125
+ )
126
+ if role_management == 0:
127
+ iam_role_name = _ask_field("Enter your IAM role name: ")
128
+ else:
129
+ iam_role_name = "accelerate_sagemaker_execution_role"
130
+ print(f'Accelerate will create an iam role "{iam_role_name}" using the provided credentials')
131
+ _create_iam_role_for_sagemaker(iam_role_name)
132
+
133
+ is_custom_docker_image = _ask_field(
134
+ "Do you want to use custom Docker image? [yes/NO]: ",
135
+ _convert_yes_no_to_bool,
136
+ default=False,
137
+ error_message="Please enter yes or no.",
138
+ )
139
+ docker_image = None
140
+ if is_custom_docker_image:
141
+ docker_image = _ask_field("Enter your Docker image: ", lambda x: str(x).lower())
142
+
143
+ is_sagemaker_inputs_enabled = _ask_field(
144
+ "Do you want to provide SageMaker input channels with data locations? [yes/NO]: ",
145
+ _convert_yes_no_to_bool,
146
+ default=False,
147
+ error_message="Please enter yes or no.",
148
+ )
149
+ sagemaker_inputs_file = None
150
+ if is_sagemaker_inputs_enabled:
151
+ sagemaker_inputs_file = _ask_field(
152
+ "Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ",
153
+ lambda x: str(x).lower(),
154
+ )
155
+
156
+ is_sagemaker_metrics_enabled = _ask_field(
157
+ "Do you want to enable SageMaker metrics? [yes/NO]: ",
158
+ _convert_yes_no_to_bool,
159
+ default=False,
160
+ error_message="Please enter yes or no.",
161
+ )
162
+ sagemaker_metrics_file = None
163
+ if is_sagemaker_metrics_enabled:
164
+ sagemaker_metrics_file = _ask_field(
165
+ "Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ",
166
+ lambda x: str(x).lower(),
167
+ )
168
+
169
+ distributed_type = _ask_options(
170
+ "What is the distributed mode?",
171
+ ["No distributed training", "Data parallelism"],
172
+ _convert_sagemaker_distributed_mode,
173
+ )
174
+ dynamo_config = {}
175
+ use_dynamo = _ask_field(
176
+ "Do you wish to optimize your script with torch dynamo?[yes/NO]:",
177
+ _convert_yes_no_to_bool,
178
+ default=False,
179
+ error_message="Please enter yes or no.",
180
+ )
181
+ if use_dynamo:
182
+ prefix = "dynamo_"
183
+ dynamo_config[prefix + "backend"] = _ask_options(
184
+ "Which dynamo backend would you like to use?",
185
+ [x.lower() for x in DYNAMO_BACKENDS],
186
+ _convert_dynamo_backend,
187
+ default=2,
188
+ )
189
+ use_custom_options = _ask_field(
190
+ "Do you want to customize the defaults sent to torch.compile? [yes/NO]: ",
191
+ _convert_yes_no_to_bool,
192
+ default=False,
193
+ error_message="Please enter yes or no.",
194
+ )
195
+
196
+ if use_custom_options:
197
+ dynamo_config[prefix + "mode"] = _ask_options(
198
+ "Which mode do you want to use?",
199
+ TORCH_DYNAMO_MODES,
200
+ lambda x: TORCH_DYNAMO_MODES[int(x)],
201
+ default="default",
202
+ )
203
+ dynamo_config[prefix + "use_fullgraph"] = _ask_field(
204
+ "Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ",
205
+ _convert_yes_no_to_bool,
206
+ default=False,
207
+ error_message="Please enter yes or no.",
208
+ )
209
+ dynamo_config[prefix + "use_dynamic"] = _ask_field(
210
+ "Do you want to enable dynamic shape tracing? [yes/NO]: ",
211
+ _convert_yes_no_to_bool,
212
+ default=False,
213
+ error_message="Please enter yes or no.",
214
+ )
215
+ ec2_instance_query = "Which EC2 instance type you want to use for your training?"
216
+ if distributed_type != SageMakerDistributedType.NO:
217
+ ec2_instance_type = _ask_options(
218
+ ec2_instance_query, SAGEMAKER_PARALLEL_EC2_INSTANCES, lambda x: SAGEMAKER_PARALLEL_EC2_INSTANCES[int(x)]
219
+ )
220
+ else:
221
+ ec2_instance_query += "? [ml.p3.2xlarge]:"
222
+ ec2_instance_type = _ask_field(ec2_instance_query, lambda x: str(x).lower(), default="ml.p3.2xlarge")
223
+
224
+ debug = False
225
+ if distributed_type != SageMakerDistributedType.NO:
226
+ debug = _ask_field(
227
+ "Should distributed operations be checked while running for errors? This can avoid timeout issues but will be slower. [yes/NO]: ",
228
+ _convert_yes_no_to_bool,
229
+ default=False,
230
+ error_message="Please enter yes or no.",
231
+ )
232
+
233
+ num_machines = 1
234
+ if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
235
+ num_machines = _ask_field(
236
+ "How many machines do you want use? [1]: ",
237
+ int,
238
+ default=1,
239
+ )
240
+
241
+ mixed_precision = _ask_options(
242
+ "Do you wish to use FP16 or BF16 (mixed precision)?",
243
+ ["no", "fp16", "bf16", "fp8"],
244
+ _convert_mixed_precision,
245
+ )
246
+
247
+ if use_dynamo and mixed_precision == "no":
248
+ print(
249
+ "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts."
250
+ )
251
+
252
+ return SageMakerConfig(
253
+ image_uri=docker_image,
254
+ compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER,
255
+ distributed_type=distributed_type,
256
+ use_cpu=False,
257
+ dynamo_config=dynamo_config,
258
+ ec2_instance_type=ec2_instance_type,
259
+ profile=aws_profile,
260
+ region=aws_region,
261
+ iam_role_name=iam_role_name,
262
+ mixed_precision=mixed_precision,
263
+ num_machines=num_machines,
264
+ sagemaker_inputs_file=sagemaker_inputs_file,
265
+ sagemaker_metrics_file=sagemaker_metrics_file,
266
+ debug=debug,
267
+ )
llmeval-env/lib/python3.10/site-packages/accelerate/commands/env.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+ import os
19
+ import platform
20
+ import subprocess
21
+
22
+ import numpy as np
23
+ import psutil
24
+ import torch
25
+
26
+ from accelerate import __version__ as version
27
+ from accelerate.commands.config import default_config_file, load_config_from_file
28
+
29
+ from ..utils import is_mlu_available, is_npu_available, is_xpu_available
30
+
31
+
32
+ def env_command_parser(subparsers=None):
33
+ if subparsers is not None:
34
+ parser = subparsers.add_parser("env")
35
+ else:
36
+ parser = argparse.ArgumentParser("Accelerate env command")
37
+
38
+ parser.add_argument(
39
+ "--config_file", default=None, help="The config file to use for the default values in the launching script."
40
+ )
41
+
42
+ if subparsers is not None:
43
+ parser.set_defaults(func=env_command)
44
+ return parser
45
+
46
+
47
+ def env_command(args):
48
+ pt_version = torch.__version__
49
+ pt_cuda_available = torch.cuda.is_available()
50
+ pt_xpu_available = is_xpu_available()
51
+ pt_mlu_available = is_mlu_available()
52
+ pt_npu_available = is_npu_available()
53
+
54
+ accelerate_config = "Not found"
55
+ # Get the default from the config file.
56
+ if args.config_file is not None or os.path.isfile(default_config_file):
57
+ accelerate_config = load_config_from_file(args.config_file).to_dict()
58
+
59
+ # if we can run which, get it
60
+ command = None
61
+ bash_location = "Not found"
62
+ if os.name == "nt":
63
+ command = ["where", "accelerate"]
64
+ elif os.name == "posix":
65
+ command = ["which", "accelerate"]
66
+ if command is not None:
67
+ bash_location = subprocess.check_output(command, text=True, stderr=subprocess.STDOUT).strip()
68
+ info = {
69
+ "`Accelerate` version": version,
70
+ "Platform": platform.platform(),
71
+ "`accelerate` bash location": bash_location,
72
+ "Python version": platform.python_version(),
73
+ "Numpy version": np.__version__,
74
+ "PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})",
75
+ "PyTorch XPU available": str(pt_xpu_available),
76
+ "PyTorch NPU available": str(pt_npu_available),
77
+ "PyTorch MLU available": str(pt_mlu_available),
78
+ "System RAM": f"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB",
79
+ }
80
+ if pt_cuda_available:
81
+ info["GPU type"] = torch.cuda.get_device_name()
82
+ if pt_npu_available:
83
+ info["CANN version"] = torch.version.cann
84
+
85
+ print("\nCopy-and-paste the text below in your GitHub issue\n")
86
+ print("\n".join([f"- {prop}: {val}" for prop, val in info.items()]))
87
+
88
+ print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:")
89
+ accelerate_config_str = (
90
+ "\n".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()])
91
+ if isinstance(accelerate_config, dict)
92
+ else f"\t{accelerate_config}"
93
+ )
94
+ print(accelerate_config_str)
95
+
96
+ info["`Accelerate` configs"] = accelerate_config
97
+
98
+ return info
99
+
100
+
101
+ def main() -> int:
102
+ parser = env_command_parser()
103
+ args = parser.parse_args()
104
+ env_command(args)
105
+ return 0
106
+
107
+
108
+ if __name__ == "__main__":
109
+ raise SystemExit(main())
llmeval-env/lib/python3.10/site-packages/accelerate/commands/estimate.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ from huggingface_hub import model_info
17
+ from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError
18
+
19
+ from accelerate import init_empty_weights
20
+ from accelerate.commands.utils import CustomArgumentParser
21
+ from accelerate.utils import (
22
+ calculate_maximum_sizes,
23
+ convert_bytes,
24
+ is_timm_available,
25
+ is_transformers_available,
26
+ )
27
+
28
+
29
+ if is_transformers_available():
30
+ import transformers
31
+ from transformers import AutoConfig, AutoModel
32
+
33
+ if is_timm_available():
34
+ import timm
35
+
36
+
37
+ def verify_on_hub(repo: str, token: str = None):
38
+ "Verifies that the model is on the hub and returns the model info."
39
+ try:
40
+ return model_info(repo, token=token)
41
+ except GatedRepoError:
42
+ return "gated"
43
+ except RepositoryNotFoundError:
44
+ return "repo"
45
+
46
+
47
+ def check_has_model(error):
48
+ """
49
+ Checks what library spawned `error` when a model is not found
50
+ """
51
+ if is_timm_available() and isinstance(error, RuntimeError) and "Unknown model" in error.args[0]:
52
+ return "timm"
53
+ elif (
54
+ is_transformers_available()
55
+ and isinstance(error, OSError)
56
+ and "does not appear to have a file named" in error.args[0]
57
+ ):
58
+ return "transformers"
59
+ else:
60
+ return "unknown"
61
+
62
+
63
+ def create_empty_model(model_name: str, library_name: str, trust_remote_code: bool = False, access_token: str = None):
64
+ """
65
+ Creates an empty model from its parent library on the `Hub` to calculate the overall memory consumption.
66
+
67
+ Args:
68
+ model_name (`str`):
69
+ The model name on the Hub
70
+ library_name (`str`):
71
+ The library the model has an integration with, such as `transformers`. Will be used if `model_name` has no
72
+ metadata on the Hub to determine the library.
73
+ trust_remote_code (`bool`, `optional`, defaults to `False`):
74
+ Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
75
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
76
+ execute code present on the Hub on your local machine.
77
+ access_token (`str`, `optional`, defaults to `None`):
78
+ The access token to use to access private or gated models on the Hub. (for use on the Gradio app)
79
+
80
+ Returns:
81
+ `torch.nn.Module`: The torch model that has been initialized on the `meta` device.
82
+
83
+ """
84
+ model_info = verify_on_hub(model_name, access_token)
85
+ # Simplified errors
86
+ if model_info == "gated":
87
+ raise GatedRepoError(
88
+ f"Repo for model `{model_name}` is gated. You must be authenticated to access it. Please run `huggingface-cli login`."
89
+ )
90
+ elif model_info == "repo":
91
+ raise RepositoryNotFoundError(
92
+ f"Repo for model `{model_name}` does not exist on the Hub. If you are trying to access a private repo,"
93
+ " make sure you are authenticated via `huggingface-cli login` and have access."
94
+ )
95
+ if library_name is None:
96
+ library_name = getattr(model_info, "library_name", False)
97
+ if not library_name:
98
+ raise ValueError(
99
+ f"Model `{model_name}` does not have any library metadata on the Hub, please manually pass in a `--library_name` to use (such as `transformers`)"
100
+ )
101
+ if library_name == "transformers":
102
+ if not is_transformers_available():
103
+ raise ImportError(
104
+ f"To check `{model_name}`, `transformers` must be installed. Please install it via `pip install transformers`"
105
+ )
106
+ print(f"Loading pretrained config for `{model_name}` from `transformers`...")
107
+ if model_info.config is None:
108
+ raise RuntimeError(f"Tried to load `{model_name}` with `transformers` but it does not have any metadata.")
109
+
110
+ auto_map = model_info.config.get("auto_map", False)
111
+ config = AutoConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code, token=access_token)
112
+ with init_empty_weights():
113
+ # remote code could specify a specific `AutoModel` class in the `auto_map`
114
+ constructor = AutoModel
115
+ if isinstance(auto_map, dict):
116
+ value = None
117
+ for key in auto_map.keys():
118
+ if key.startswith("AutoModelFor"):
119
+ value = key
120
+ break
121
+ if value is not None:
122
+ constructor = getattr(transformers, value)
123
+ model = constructor.from_config(config, trust_remote_code=trust_remote_code)
124
+ elif library_name == "timm":
125
+ if not is_timm_available():
126
+ raise ImportError(
127
+ f"To check `{model_name}`, `timm` must be installed. Please install it via `pip install timm`"
128
+ )
129
+ print(f"Loading pretrained config for `{model_name}` from `timm`...")
130
+ with init_empty_weights():
131
+ model = timm.create_model(model_name, pretrained=False)
132
+ else:
133
+ raise ValueError(
134
+ f"Library `{library_name}` is not supported yet, please open an issue on GitHub for us to add support."
135
+ )
136
+ return model
137
+
138
+
139
+ def create_ascii_table(headers: list, rows: list, title: str):
140
+ "Creates a pretty table from a list of rows, minimal version of `tabulate`."
141
+ sep_char, in_between = "│", "─"
142
+ column_widths = []
143
+ for i in range(len(headers)):
144
+ column_values = [row[i] for row in rows] + [headers[i]]
145
+ max_column_width = max(len(value) for value in column_values)
146
+ column_widths.append(max_column_width)
147
+
148
+ formats = [f"%{column_widths[i]}s" for i in range(len(rows[0]))]
149
+
150
+ pattern = f"{sep_char}{sep_char.join(formats)}{sep_char}"
151
+ diff = 0
152
+
153
+ def make_row(left_char, middle_char, right_char):
154
+ return f"{left_char}{middle_char.join([in_between * n for n in column_widths])}{in_between * diff}{right_char}"
155
+
156
+ separator = make_row("├", "┼", "┤")
157
+ if len(title) > sum(column_widths):
158
+ diff = abs(len(title) - len(separator))
159
+ column_widths[-1] += diff
160
+
161
+ # Update with diff
162
+ separator = make_row("├", "┼", "┤")
163
+ initial_rows = [
164
+ make_row("┌", in_between, "┐"),
165
+ f"{sep_char}{title.center(len(separator) - 2)}{sep_char}",
166
+ make_row("├", "┬", "┤"),
167
+ ]
168
+ table = "\n".join(initial_rows) + "\n"
169
+ column_widths[-1] += diff
170
+ centered_line = [text.center(column_widths[i]) for i, text in enumerate(headers)]
171
+ table += f"{pattern % tuple(centered_line)}\n{separator}\n"
172
+ for i, line in enumerate(rows):
173
+ centered_line = [t.center(column_widths[i]) for i, t in enumerate(line)]
174
+ table += f"{pattern % tuple(centered_line)}\n"
175
+ table += f'└{"┴".join([in_between * n for n in column_widths])}┘'
176
+
177
+ return table
178
+
179
+
180
+ def estimate_command_parser(subparsers=None):
181
+ if subparsers is not None:
182
+ parser = subparsers.add_parser("estimate-memory")
183
+ else:
184
+ parser = CustomArgumentParser(description="Model size estimator for fitting a model onto CUDA memory.")
185
+
186
+ parser.add_argument("model_name", type=str, help="The model name on the Hugging Face Hub.")
187
+ parser.add_argument(
188
+ "--library_name",
189
+ type=str,
190
+ help="The library the model has an integration with, such as `transformers`, needed only if this information is not stored on the Hub.",
191
+ choices=["timm", "transformers"],
192
+ )
193
+ parser.add_argument(
194
+ "--dtypes",
195
+ type=str,
196
+ nargs="+",
197
+ default=["float32", "float16", "int8", "int4"],
198
+ help="The dtypes to use for the model, must be one (or many) of `float32`, `float16`, `int8`, and `int4`",
199
+ choices=["float32", "float16", "int8", "int4"],
200
+ )
201
+ parser.add_argument(
202
+ "--trust_remote_code",
203
+ action="store_true",
204
+ help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. This flag
205
+ should only be used for repositories you trust and in which you have read the code, as it will execute
206
+ code present on the Hub on your local machine.""",
207
+ default=False,
208
+ )
209
+
210
+ if subparsers is not None:
211
+ parser.set_defaults(func=estimate_command)
212
+ return parser
213
+
214
+
215
+ def estimate_training_usage(bytes: int, mixed_precision: str, msamp_config: str = None) -> dict:
216
+ """
217
+ Given an amount of `bytes` and `mixed_precision`, calculates how much training memory is needed for a batch size of
218
+ 1.
219
+
220
+ Args:
221
+ bytes (`int`):
222
+ The size of the model being trained.
223
+ mixed_precision (`str`):
224
+ The mixed precision that would be ran.
225
+ msamp_config (`str`):
226
+ The msamp config to estimate the training memory for if `mixed_precision` is set to `"fp8"`.
227
+ """
228
+ memory_sizes = {"model": -1, "optimizer": -1, "gradients": -1, "step": -1}
229
+ fp32_size = bytes
230
+ fp16_size = bytes // 2
231
+
232
+ if mixed_precision == "float32":
233
+ memory_sizes["model"] = fp32_size
234
+ memory_sizes["gradients"] = fp32_size
235
+ memory_sizes["optimizer"] = fp32_size * 2
236
+ memory_sizes["step"] = fp32_size * 4
237
+ elif mixed_precision in ("float16", "bfloat16") or (mixed_precision == "fp8" and msamp_config is None):
238
+ # With native `TransformersEngine`, there is no memory savings with FP8
239
+ # With mixed precision training, the model has weights stored
240
+ # in FP16 and FP32
241
+ memory_sizes["model"] = fp32_size
242
+ # 1.5 from weight gradient + computation (GEMM)
243
+ memory_sizes["gradients"] = fp32_size + fp16_size
244
+ # 2x from optimizer states
245
+ memory_sizes["optimizer"] = fp32_size * 2 # Optimizer states
246
+ memory_sizes["step"] = memory_sizes["optimizer"]
247
+ return memory_sizes
248
+
249
+
250
+ def gather_data(args):
251
+ "Creates an empty model and gathers the data for the sizes"
252
+ try:
253
+ model = create_empty_model(
254
+ args.model_name, library_name=args.library_name, trust_remote_code=args.trust_remote_code
255
+ )
256
+ except (RuntimeError, OSError) as e:
257
+ library = check_has_model(e)
258
+ if library != "unknown":
259
+ raise RuntimeError(
260
+ f"Tried to load `{args.model_name}` with `{library}` but a possible model to load was not found inside the repo."
261
+ )
262
+ raise e
263
+
264
+ total_size, largest_layer = calculate_maximum_sizes(model)
265
+
266
+ data = []
267
+
268
+ for dtype in args.dtypes:
269
+ dtype_total_size = total_size
270
+ dtype_largest_layer = largest_layer[0]
271
+ dtype_training_size = estimate_training_usage(dtype_total_size, dtype)
272
+ if dtype == "float16":
273
+ dtype_total_size /= 2
274
+ dtype_largest_layer /= 2
275
+ elif dtype == "int8":
276
+ dtype_total_size /= 4
277
+ dtype_largest_layer /= 4
278
+ elif dtype == "int4":
279
+ dtype_total_size /= 8
280
+ dtype_largest_layer /= 8
281
+ data.append([dtype, dtype_largest_layer, dtype_total_size, dtype_training_size])
282
+ return data
283
+
284
+
285
+ def estimate_command(args):
286
+ data = gather_data(args)
287
+ for row in data:
288
+ for i, item in enumerate(row):
289
+ if isinstance(item, (int, float)):
290
+ row[i] = convert_bytes(item)
291
+ elif isinstance(item, dict):
292
+ training_usage = max(item.values())
293
+ row[i] = convert_bytes(training_usage) if training_usage != -1 else "N/A"
294
+
295
+ headers = ["dtype", "Largest Layer", "Total Size", "Training using Adam"]
296
+
297
+ title = f"Memory Usage for loading `{args.model_name}`"
298
+ table = create_ascii_table(headers, data, title)
299
+ print(table)
300
+
301
+
302
+ def main():
303
+ parser = estimate_command_parser()
304
+ args = parser.parse_args()
305
+ estimate_command(args)
306
+
307
+
308
+ if __name__ == "__main__":
309
+ main()
llmeval-env/lib/python3.10/site-packages/accelerate/commands/launch.py ADDED
@@ -0,0 +1,1092 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+ import importlib
19
+ import logging
20
+ import os
21
+ import subprocess
22
+ import sys
23
+ from pathlib import Path
24
+
25
+ import psutil
26
+ import torch
27
+
28
+ from accelerate.commands.config import default_config_file, load_config_from_file
29
+ from accelerate.commands.config.config_args import SageMakerConfig
30
+ from accelerate.commands.config.config_utils import DYNAMO_BACKENDS
31
+ from accelerate.commands.utils import CustomArgumentParser
32
+ from accelerate.state import get_int_from_env
33
+ from accelerate.utils import (
34
+ ComputeEnvironment,
35
+ DistributedType,
36
+ PrepareForLaunch,
37
+ _filter_args,
38
+ check_cuda_p2p_ib_support,
39
+ convert_dict_to_env_variables,
40
+ is_bf16_available,
41
+ is_deepspeed_available,
42
+ is_mlu_available,
43
+ is_npu_available,
44
+ is_rich_available,
45
+ is_sagemaker_available,
46
+ is_torch_version,
47
+ is_torch_xla_available,
48
+ is_xpu_available,
49
+ patch_environment,
50
+ prepare_deepspeed_cmd_env,
51
+ prepare_multi_gpu_env,
52
+ prepare_sagemager_args_inputs,
53
+ prepare_simple_launcher_cmd_env,
54
+ prepare_tpu,
55
+ )
56
+ from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS, TORCH_DYNAMO_MODES
57
+
58
+
59
+ if is_rich_available():
60
+ from rich import get_console
61
+ from rich.logging import RichHandler
62
+
63
+ FORMAT = "%(message)s"
64
+ logging.basicConfig(format=FORMAT, datefmt="[%X]", handlers=[RichHandler()])
65
+
66
+
67
+ logger = logging.getLogger(__name__)
68
+
69
+
70
+ options_to_group = {
71
+ "multi_gpu": "Distributed GPUs",
72
+ "tpu": "TPU",
73
+ "use_deepspeed": "DeepSpeed Arguments",
74
+ "use_fsdp": "FSDP Arguments",
75
+ "use_megatron_lm": "Megatron-LM Arguments",
76
+ }
77
+
78
+
79
+ def clean_option(option):
80
+ "Finds all cases of - after the first two characters and changes them to _"
81
+ if option.startswith("--"):
82
+ return option[2:].replace("-", "_")
83
+
84
+
85
+ class CustomHelpFormatter(argparse.HelpFormatter):
86
+ """
87
+ This is a custom help formatter that will hide all arguments that are not used in the command line when the help is
88
+ called. This is useful for the case where the user is using a specific platform and only wants to see the arguments
89
+ for that platform.
90
+ """
91
+
92
+ def __init__(self, *args, **kwargs):
93
+ super().__init__(*args, **kwargs)
94
+ self.titles = [
95
+ "Hardware Selection Arguments",
96
+ "Resource Selection Arguments",
97
+ "Training Paradigm Arguments",
98
+ "positional arguments",
99
+ "optional arguments",
100
+ ]
101
+
102
+ def add_argument(self, action: argparse.Action):
103
+ if "accelerate" in sys.argv[0] and "launch" in sys.argv[1:]:
104
+ args = sys.argv[2:]
105
+ else:
106
+ args = sys.argv[1:]
107
+
108
+ if len(args) > 1:
109
+ args = list(map(clean_option, args))
110
+ used_platforms = [arg for arg in args if arg in options_to_group.keys()]
111
+ used_titles = [options_to_group[o] for o in used_platforms]
112
+ if action.container.title not in self.titles + used_titles:
113
+ action.help = argparse.SUPPRESS
114
+ elif action.container.title == "Hardware Selection Arguments":
115
+ if set(action.option_strings).isdisjoint(set(args)):
116
+ action.help = argparse.SUPPRESS
117
+ else:
118
+ action.help = action.help + " (currently selected)"
119
+ elif action.container.title == "Training Paradigm Arguments":
120
+ if set(action.option_strings).isdisjoint(set(args)):
121
+ action.help = argparse.SUPPRESS
122
+ else:
123
+ action.help = action.help + " (currently selected)"
124
+
125
+ action.option_strings = [s for s in action.option_strings if "-" not in s[2:]]
126
+ super().add_argument(action)
127
+
128
+ def end_section(self):
129
+ if len(self._current_section.items) < 2:
130
+ self._current_section.items = []
131
+ self._current_section.heading = ""
132
+ super().end_section()
133
+
134
+
135
+ def launch_command_parser(subparsers=None):
136
+ description = "Launch a python script in a distributed scenario. Arguments can be passed in with either hyphens (`--num-processes=2`) or underscores (`--num_processes=2`)"
137
+ if subparsers is not None:
138
+ parser = subparsers.add_parser(
139
+ "launch", description=description, add_help=False, allow_abbrev=False, formatter_class=CustomHelpFormatter
140
+ )
141
+ else:
142
+ parser = CustomArgumentParser(
143
+ "Accelerate launch command",
144
+ description=description,
145
+ add_help=False,
146
+ allow_abbrev=False,
147
+ formatter_class=CustomHelpFormatter,
148
+ )
149
+
150
+ parser.add_argument("-h", "--help", action="help", help="Show this help message and exit.")
151
+
152
+ parser.add_argument(
153
+ "--config_file",
154
+ default=None,
155
+ help="The config file to use for the default values in the launching script.",
156
+ )
157
+ parser.add_argument(
158
+ "--quiet",
159
+ "-q",
160
+ action="store_true",
161
+ help="Silence subprocess errors from the launch stack trace and only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations)",
162
+ )
163
+ # Hardware selection arguments
164
+ hardware_args = parser.add_argument_group(
165
+ "Hardware Selection Arguments", "Arguments for selecting the hardware to be used."
166
+ )
167
+ hardware_args.add_argument(
168
+ "--cpu", default=False, action="store_true", help="Whether or not to force the training on the CPU."
169
+ )
170
+ hardware_args.add_argument(
171
+ "--multi_gpu",
172
+ default=False,
173
+ action="store_true",
174
+ help="Whether or not this should launch a distributed GPU training.",
175
+ )
176
+ hardware_args.add_argument(
177
+ "--tpu", default=False, action="store_true", help="Whether or not this should launch a TPU training."
178
+ )
179
+ hardware_args.add_argument(
180
+ "--ipex",
181
+ default=False,
182
+ action="store_true",
183
+ help="Whether or not this should launch a Intel PyTorch Extension (IPEX) training.",
184
+ )
185
+
186
+ # Resource selection arguments
187
+ resource_args = parser.add_argument_group(
188
+ "Resource Selection Arguments", "Arguments for fine-tuning how available hardware should be used."
189
+ )
190
+ resource_args.add_argument(
191
+ "--mixed_precision",
192
+ type=str,
193
+ choices=["no", "fp16", "bf16", "fp8"],
194
+ help="Whether or not to use mixed precision training. "
195
+ "Choose between FP16 and BF16 (bfloat16) training. "
196
+ "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.",
197
+ )
198
+ resource_args.add_argument(
199
+ "--num_processes", type=int, default=None, help="The total number of processes to be launched in parallel."
200
+ )
201
+ resource_args.add_argument(
202
+ "--num_machines", type=int, default=None, help="The total number of machines used in this training."
203
+ )
204
+ resource_args.add_argument(
205
+ "--num_cpu_threads_per_process",
206
+ type=int,
207
+ default=None,
208
+ help="The number of CPU threads per process. Can be tuned for optimal performance.",
209
+ )
210
+ resource_args.add_argument(
211
+ "--enable_cpu_affinity",
212
+ default=False,
213
+ action="store_true",
214
+ help="Whether or not CPU affinity and balancing should be enabled. Currently only supported on NVIDIA hardware.",
215
+ )
216
+
217
+ # Dynamo arguments
218
+ resource_args.add_argument(
219
+ "--dynamo_backend",
220
+ type=str,
221
+ choices=["no"] + [b.lower() for b in DYNAMO_BACKENDS],
222
+ help="Choose a backend to optimize your training with dynamo, see more at "
223
+ "https://github.com/pytorch/torchdynamo.",
224
+ )
225
+ resource_args.add_argument(
226
+ "--dynamo_mode",
227
+ type=str,
228
+ default="default",
229
+ choices=TORCH_DYNAMO_MODES,
230
+ help="Choose a mode to optimize your training with dynamo.",
231
+ )
232
+ resource_args.add_argument(
233
+ "--dynamo_use_fullgraph",
234
+ default=False,
235
+ action="store_true",
236
+ help="Whether to use full graph mode for dynamo or it is ok to break model into several subgraphs",
237
+ )
238
+ resource_args.add_argument(
239
+ "--dynamo_use_dynamic",
240
+ default=False,
241
+ action="store_true",
242
+ help="Whether to enable dynamic shape tracing.",
243
+ )
244
+
245
+ # Training Paradigm arguments
246
+ paradigm_args = parser.add_argument_group(
247
+ "Training Paradigm Arguments", "Arguments for selecting which training paradigm to be used."
248
+ )
249
+ paradigm_args.add_argument(
250
+ "--use_deepspeed",
251
+ default=False,
252
+ action="store_true",
253
+ help="Whether to use deepspeed.",
254
+ )
255
+ paradigm_args.add_argument(
256
+ "--use_fsdp",
257
+ default=False,
258
+ action="store_true",
259
+ help="Whether to use fsdp.",
260
+ )
261
+ paradigm_args.add_argument(
262
+ "--use_megatron_lm",
263
+ default=False,
264
+ action="store_true",
265
+ help="Whether to use Megatron-LM.",
266
+ )
267
+ paradigm_args.add_argument(
268
+ "--use_xpu",
269
+ default=False,
270
+ action="store_true",
271
+ help="Whether to use IPEX plugin to speed up training on XPU specifically.",
272
+ )
273
+
274
+ # distributed GPU training arguments
275
+ distributed_args = parser.add_argument_group("Distributed GPUs", "Arguments related to distributed GPU training.")
276
+ distributed_args.add_argument(
277
+ "--gpu_ids",
278
+ default=None,
279
+ help="What GPUs (by id) should be used for training on this machine as a comma-seperated list",
280
+ )
281
+ distributed_args.add_argument(
282
+ "--same_network",
283
+ default=False,
284
+ action="store_true",
285
+ help="Whether all machines used for multinode training exist on the same local network.",
286
+ )
287
+ distributed_args.add_argument(
288
+ "--machine_rank", type=int, default=None, help="The rank of the machine on which this script is launched."
289
+ )
290
+ distributed_args.add_argument(
291
+ "--main_process_ip", type=str, default=None, help="The IP address of the machine of rank 0."
292
+ )
293
+ distributed_args.add_argument(
294
+ "--main_process_port",
295
+ type=int,
296
+ default=None,
297
+ help="The port to use to communicate with the machine of rank 0.",
298
+ )
299
+ distributed_args.add_argument(
300
+ "-t",
301
+ "--tee",
302
+ default="0",
303
+ type=str,
304
+ help="Tee std streams into a log file and also to console.",
305
+ )
306
+ distributed_args.add_argument(
307
+ "--role",
308
+ type=str,
309
+ default="default",
310
+ help="User-defined role for the workers.",
311
+ )
312
+ # Rendezvous related arguments
313
+ distributed_args.add_argument(
314
+ "--rdzv_backend",
315
+ type=str,
316
+ default="static",
317
+ help="The rendezvous method to use, such as 'static' (the default) or 'c10d'",
318
+ )
319
+ distributed_args.add_argument(
320
+ "--rdzv_conf",
321
+ type=str,
322
+ default="",
323
+ help="Additional rendezvous configuration (<key1>=<value1>,<key2>=<value2>,...).",
324
+ )
325
+ distributed_args.add_argument(
326
+ "--max_restarts",
327
+ type=int,
328
+ default=0,
329
+ help="Maximum number of worker group restarts before failing.",
330
+ )
331
+ distributed_args.add_argument(
332
+ "--monitor_interval",
333
+ type=float,
334
+ default=5,
335
+ help="Interval, in seconds, to monitor the state of workers.",
336
+ )
337
+ parser.add_argument(
338
+ "-m",
339
+ "--module",
340
+ action="store_true",
341
+ help="Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.",
342
+ )
343
+ parser.add_argument(
344
+ "--no_python",
345
+ action="store_true",
346
+ help="Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.",
347
+ )
348
+
349
+ # TPU arguments
350
+ tpu_args = parser.add_argument_group("TPU", "Arguments related to TPU.")
351
+ tpu_args.add_argument(
352
+ "--tpu_cluster",
353
+ action="store_true",
354
+ dest="tpu_use_cluster",
355
+ help="Whether to use a GCP TPU pod for training.",
356
+ )
357
+ tpu_args.add_argument(
358
+ "--no_tpu_cluster",
359
+ action="store_false",
360
+ dest="tpu_use_cluster",
361
+ help="Should not be passed explicitly, this is for internal use only.",
362
+ )
363
+ tpu_args.add_argument(
364
+ "--tpu_use_sudo",
365
+ action="store_true",
366
+ help="Whether to use `sudo` when running the TPU training script in each pod.",
367
+ )
368
+ tpu_args.add_argument(
369
+ "--vm",
370
+ type=str,
371
+ action="append",
372
+ help=(
373
+ "List of single Compute VM instance names. "
374
+ "If not provided we assume usage of instance groups. For TPU pods."
375
+ ),
376
+ )
377
+ tpu_args.add_argument(
378
+ "--env",
379
+ type=str,
380
+ action="append",
381
+ help="List of environment variables to set on the Compute VM instances. For TPU pods.",
382
+ )
383
+ tpu_args.add_argument(
384
+ "--main_training_function",
385
+ type=str,
386
+ default=None,
387
+ help="The name of the main function to be executed in your script (only for TPU training).",
388
+ )
389
+ tpu_args.add_argument(
390
+ "--downcast_bf16",
391
+ action="store_true",
392
+ help="Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.",
393
+ )
394
+
395
+ # DeepSpeed arguments
396
+ deepspeed_args = parser.add_argument_group("DeepSpeed Arguments", "Arguments related to DeepSpeed.")
397
+ deepspeed_args.add_argument(
398
+ "--deepspeed_config_file",
399
+ default=None,
400
+ type=str,
401
+ help="DeepSpeed config file.",
402
+ )
403
+ deepspeed_args.add_argument(
404
+ "--zero_stage",
405
+ default=None,
406
+ type=int,
407
+ help="DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). "
408
+ "If unspecified, will default to `2`.",
409
+ )
410
+ deepspeed_args.add_argument(
411
+ "--offload_optimizer_device",
412
+ default=None,
413
+ type=str,
414
+ help="Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). "
415
+ "If unspecified, will default to 'none'.",
416
+ )
417
+ deepspeed_args.add_argument(
418
+ "--offload_param_device",
419
+ default=None,
420
+ type=str,
421
+ help="Decides where (none|cpu|nvme) to offload parameters (useful only when `use_deepspeed` flag is passed). "
422
+ "If unspecified, will default to 'none'.",
423
+ )
424
+ deepspeed_args.add_argument(
425
+ "--offload_optimizer_nvme_path",
426
+ default=None,
427
+ type=str,
428
+ help="Decides Nvme Path to offload optimizer states (useful only when `use_deepspeed` flag is passed). "
429
+ "If unspecified, will default to 'none'.",
430
+ )
431
+ deepspeed_args.add_argument(
432
+ "--offload_param_nvme_path",
433
+ default=None,
434
+ type=str,
435
+ help="Decides Nvme Path to offload parameters (useful only when `use_deepspeed` flag is passed). "
436
+ "If unspecified, will default to 'none'.",
437
+ )
438
+ deepspeed_args.add_argument(
439
+ "--gradient_accumulation_steps",
440
+ default=None,
441
+ type=int,
442
+ help="No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed). "
443
+ "If unspecified, will default to `1`.",
444
+ )
445
+ deepspeed_args.add_argument(
446
+ "--gradient_clipping",
447
+ default=None,
448
+ type=float,
449
+ help="gradient clipping value used in your training script (useful only when `use_deepspeed` flag is passed). "
450
+ "If unspecified, will default to `1.0`.",
451
+ )
452
+ deepspeed_args.add_argument(
453
+ "--zero3_init_flag",
454
+ default=None,
455
+ type=str,
456
+ help="Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. "
457
+ "Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `true`.",
458
+ )
459
+ deepspeed_args.add_argument(
460
+ "--zero3_save_16bit_model",
461
+ default=None,
462
+ type=str,
463
+ help="Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. "
464
+ "Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `false`.",
465
+ )
466
+ deepspeed_args.add_argument(
467
+ "--deepspeed_hostfile",
468
+ default=None,
469
+ type=str,
470
+ help="DeepSpeed hostfile for configuring multi-node compute resources.",
471
+ )
472
+ deepspeed_args.add_argument(
473
+ "--deepspeed_exclusion_filter",
474
+ default=None,
475
+ type=str,
476
+ help="DeepSpeed exclusion filter string when using mutli-node setup.",
477
+ )
478
+ deepspeed_args.add_argument(
479
+ "--deepspeed_inclusion_filter",
480
+ default=None,
481
+ type=str,
482
+ help="DeepSpeed inclusion filter string when using mutli-node setup.",
483
+ )
484
+ deepspeed_args.add_argument(
485
+ "--deepspeed_multinode_launcher",
486
+ default=None,
487
+ type=str,
488
+ help="DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.",
489
+ )
490
+ deepspeed_args.add_argument(
491
+ "--deepspeed_moe_layer_cls_names",
492
+ default=None,
493
+ type=str,
494
+ help="comma-separated list of transformer MoE layer class names (case-sensitive) to wrap ,e.g, `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ..."
495
+ " (useful only when `use_deepspeed` flag is passed).",
496
+ )
497
+
498
+ # fsdp arguments
499
+ fsdp_args = parser.add_argument_group("FSDP Arguments", "Arguments related to Fully Shared Data Parallelism.")
500
+ fsdp_args.add_argument(
501
+ "--fsdp_offload_params",
502
+ default="false",
503
+ type=str,
504
+ help="Decides Whether (true|false) to offload parameters and gradients to CPU. (useful only when `use_fsdp` flag is passed).",
505
+ )
506
+ fsdp_args.add_argument(
507
+ "--fsdp_min_num_params",
508
+ type=int,
509
+ default=1e8,
510
+ help="FSDP's minimum number of parameters for Default Auto Wrapping. (useful only when `use_fsdp` flag is passed).",
511
+ )
512
+ fsdp_args.add_argument(
513
+ "--fsdp_sharding_strategy",
514
+ type=str,
515
+ default="FULL_SHARD",
516
+ help="FSDP's Sharding Strategy. (useful only when `use_fsdp` flag is passed).",
517
+ )
518
+ fsdp_args.add_argument(
519
+ "--fsdp_auto_wrap_policy",
520
+ type=str,
521
+ default=None,
522
+ help="FSDP's auto wrap policy. (useful only when `use_fsdp` flag is passed).",
523
+ )
524
+ fsdp_args.add_argument(
525
+ "--fsdp_transformer_layer_cls_to_wrap",
526
+ default=None,
527
+ type=str,
528
+ help="Transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block` .... "
529
+ "(useful only when `use_fsdp` flag is passed).",
530
+ )
531
+ fsdp_args.add_argument(
532
+ "--fsdp_backward_prefetch_policy",
533
+ default=None,
534
+ type=str,
535
+ help="This argument is deprecated and will be removed in version 0.27.0 of 🤗 Accelerate. Use `fsdp_backward_prefetch` instead.",
536
+ )
537
+ fsdp_args.add_argument(
538
+ "--fsdp_backward_prefetch",
539
+ default=None,
540
+ type=str,
541
+ help="FSDP's backward prefetch policy. (useful only when `use_fsdp` flag is passed).",
542
+ )
543
+ fsdp_args.add_argument(
544
+ "--fsdp_state_dict_type",
545
+ default=None,
546
+ type=str,
547
+ help="FSDP's state dict type. (useful only when `use_fsdp` flag is passed).",
548
+ )
549
+ fsdp_args.add_argument(
550
+ "--fsdp_forward_prefetch",
551
+ default="false",
552
+ type=str,
553
+ help="If True, then FSDP explicitly prefetches the next upcoming "
554
+ "all-gather while executing in the forward pass (useful only when `use_fsdp` flag is passed).",
555
+ )
556
+ fsdp_args.add_argument(
557
+ "--fsdp_use_orig_params",
558
+ default="true",
559
+ type=str,
560
+ help="If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable paramteres."
561
+ " (useful only when `use_fsdp` flag is passed).",
562
+ )
563
+ fsdp_args.add_argument(
564
+ "--fsdp_cpu_ram_efficient_loading",
565
+ default="true",
566
+ type=str,
567
+ help="If True, only the first process loads the pretrained model checkoint while all other processes have empty weights. "
568
+ "Only applicable for 🤗 Transformers. When using this, `--fsdp_sync_module_states` needs to True. "
569
+ "(useful only when `use_fsdp` flag is passed).",
570
+ )
571
+ fsdp_args.add_argument(
572
+ "--fsdp_sync_module_states",
573
+ default="true",
574
+ type=str,
575
+ help="If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0."
576
+ " (useful only when `use_fsdp` flag is passed).",
577
+ )
578
+
579
+ # megatron_lm args
580
+ megatron_lm_args = parser.add_argument_group("Megatron-LM Arguments", "Arguments related to Megatron-LM.")
581
+ megatron_lm_args.add_argument(
582
+ "--megatron_lm_tp_degree",
583
+ type=int,
584
+ default=1,
585
+ help="Megatron-LM's Tensor Parallelism (TP) degree. (useful only when `use_megatron_lm` flag is passed).",
586
+ )
587
+ megatron_lm_args.add_argument(
588
+ "--megatron_lm_pp_degree",
589
+ type=int,
590
+ default=1,
591
+ help="Megatron-LM's Pipeline Parallelism (PP) degree. (useful only when `use_megatron_lm` flag is passed).",
592
+ )
593
+ megatron_lm_args.add_argument(
594
+ "--megatron_lm_num_micro_batches",
595
+ type=int,
596
+ default=None,
597
+ help="Megatron-LM's number of micro batches when PP degree > 1. (useful only when `use_megatron_lm` flag is passed).",
598
+ )
599
+ megatron_lm_args.add_argument(
600
+ "--megatron_lm_sequence_parallelism",
601
+ default=None,
602
+ type=str,
603
+ help="Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1. "
604
+ "(useful only when `use_megatron_lm` flag is passed).",
605
+ )
606
+ megatron_lm_args.add_argument(
607
+ "--megatron_lm_recompute_activations",
608
+ default=None,
609
+ type=str,
610
+ help="Decides Whether (true|false) to enable Selective Activation Recomputation. "
611
+ "(useful only when `use_megatron_lm` flag is passed).",
612
+ )
613
+ megatron_lm_args.add_argument(
614
+ "--megatron_lm_use_distributed_optimizer",
615
+ default=None,
616
+ type=str,
617
+ help="Decides Whether (true|false) to use distributed optimizer "
618
+ "which shards optimizer state and gradients across Data Pralellel (DP) ranks. "
619
+ "(useful only when `use_megatron_lm` flag is passed).",
620
+ )
621
+ megatron_lm_args.add_argument(
622
+ "--megatron_lm_gradient_clipping",
623
+ default=1.0,
624
+ type=float,
625
+ help="Megatron-LM's gradient clipping value based on global L2 Norm (0 to disable). "
626
+ "(useful only when `use_megatron_lm` flag is passed).",
627
+ )
628
+
629
+ # AWS arguments
630
+ aws_args = parser.add_argument_group("AWS Arguments", "Arguments related to AWS.")
631
+ aws_args.add_argument(
632
+ "--aws_access_key_id",
633
+ type=str,
634
+ default=None,
635
+ help="The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job",
636
+ )
637
+ aws_args.add_argument(
638
+ "--aws_secret_access_key",
639
+ type=str,
640
+ default=None,
641
+ help="The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job.",
642
+ )
643
+ parser.add_argument(
644
+ "--debug",
645
+ action="store_true",
646
+ help="Whether to print out the torch.distributed stack trace when something fails.",
647
+ )
648
+ parser.add_argument(
649
+ "training_script",
650
+ type=str,
651
+ help=(
652
+ "The full path to the script to be launched in parallel, followed by all the arguments for the training "
653
+ "script."
654
+ ),
655
+ )
656
+
657
+ # MPI arguments
658
+ mpirun_args = parser.add_argument_group("MPI Arguments", "Arguments related to mpirun for Multi-CPU")
659
+ mpirun_args.add_argument(
660
+ "--mpirun_hostfile",
661
+ type=str,
662
+ default=None,
663
+ help="Location for a hostfile for using Accelerate to launch a multi-CPU training job with mpirun. This will "
664
+ "get passed to the MPI --hostfile or -f parameter, depending on which MPI program is installed.",
665
+ )
666
+ mpirun_args.add_argument(
667
+ "--mpirun_ccl",
668
+ type=int,
669
+ default=1,
670
+ help="The number of oneCCL worker threads when using Accelerate to launch multi-CPU training with mpirun.",
671
+ )
672
+
673
+ # Other arguments of the training scripts
674
+ parser.add_argument("training_script_args", nargs=argparse.REMAINDER, help="Arguments of the training script.")
675
+
676
+ if subparsers is not None:
677
+ parser.set_defaults(func=launch_command)
678
+ return parser
679
+
680
+
681
+ def simple_launcher(args):
682
+ cmd, current_env = prepare_simple_launcher_cmd_env(args)
683
+
684
+ process = subprocess.Popen(cmd, env=current_env)
685
+ process.wait()
686
+ if process.returncode != 0:
687
+ if not args.quiet:
688
+ raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
689
+ else:
690
+ sys.exit(1)
691
+
692
+
693
+ def multi_gpu_launcher(args):
694
+ import torch.distributed.run as distrib_run
695
+
696
+ current_env = prepare_multi_gpu_env(args)
697
+ if not check_cuda_p2p_ib_support():
698
+ message = "Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled."
699
+ warn = False
700
+ if "NCCL_P2P_DISABLE" not in current_env:
701
+ current_env["NCCL_P2P_DISABLE"] = "1"
702
+ warn = True
703
+ if "NCCL_IB_DISABLE" not in current_env:
704
+ current_env["NCCL_IB_DISABLE"] = "1"
705
+ warn = True
706
+ if warn:
707
+ logger.warning(message)
708
+
709
+ debug = getattr(args, "debug", False)
710
+ args = _filter_args(
711
+ args,
712
+ distrib_run.get_args_parser(),
713
+ ["--training_script", args.training_script, "--training_script_args", args.training_script_args],
714
+ )
715
+
716
+ with patch_environment(**current_env):
717
+ try:
718
+ distrib_run.run(args)
719
+ except Exception:
720
+ if is_rich_available() and debug:
721
+ console = get_console()
722
+ console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]")
723
+ console.print_exception(suppress=[__file__], show_locals=False)
724
+ else:
725
+ raise
726
+
727
+
728
+ def deepspeed_launcher(args):
729
+ import torch.distributed.run as distrib_run
730
+
731
+ if not is_deepspeed_available():
732
+ raise ImportError("DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.")
733
+ else:
734
+ from deepspeed.launcher.runner import DEEPSPEED_ENVIRONMENT_NAME
735
+
736
+ cmd, current_env = prepare_deepspeed_cmd_env(args)
737
+ if not check_cuda_p2p_ib_support():
738
+ message = "Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled."
739
+ warn = False
740
+ if "NCCL_P2P_DISABLE" not in current_env:
741
+ current_env["NCCL_P2P_DISABLE"] = "1"
742
+ warn = True
743
+ if "NCCL_IB_DISABLE" not in current_env:
744
+ current_env["NCCL_IB_DISABLE"] = "1"
745
+ warn = True
746
+ if warn:
747
+ logger.warning(message)
748
+
749
+ if args.num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
750
+ with open(DEEPSPEED_ENVIRONMENT_NAME, "a") as f:
751
+ valid_env_items = convert_dict_to_env_variables(current_env)
752
+ if len(valid_env_items) > 1:
753
+ f.writelines(valid_env_items)
754
+
755
+ process = subprocess.Popen(cmd, env=current_env)
756
+ process.wait()
757
+ if process.returncode != 0:
758
+ if not args.quiet:
759
+ raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
760
+ else:
761
+ sys.exit(1)
762
+ else:
763
+ debug = getattr(args, "debug", False)
764
+ args = _filter_args(
765
+ args,
766
+ distrib_run.get_args_parser(),
767
+ ["--training_script", args.training_script, "--training_script_args", args.training_script_args],
768
+ )
769
+ with patch_environment(**current_env):
770
+ try:
771
+ distrib_run.run(args)
772
+ except Exception:
773
+ if is_rich_available() and debug:
774
+ console = get_console()
775
+ console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]")
776
+ console.print_exception(suppress=[__file__], show_locals=False)
777
+ else:
778
+ raise
779
+
780
+
781
+ def tpu_launcher(args):
782
+ import torch_xla.distributed.xla_multiprocessing as xmp
783
+
784
+ if args.no_python:
785
+ raise ValueError("--no_python cannot be used with TPU launcher")
786
+
787
+ args, current_env = prepare_tpu(args, {})
788
+
789
+ if args.module:
790
+ mod_name = args.training_script
791
+ else:
792
+ # Import training_script as a module
793
+ script_path = Path(args.training_script)
794
+ sys.path.append(str(script_path.parent.resolve()))
795
+ mod_name = script_path.stem
796
+
797
+ mod = importlib.import_module(mod_name)
798
+ if not hasattr(mod, args.main_training_function):
799
+ raise ValueError(
800
+ f"Your training script should have a function named {args.main_training_function}, or you should pass a "
801
+ "different value to `--main_training_function`."
802
+ )
803
+
804
+ # Patch sys.argv
805
+ sys.argv = [mod.__file__] + args.training_script_args
806
+
807
+ main_function = getattr(mod, args.main_training_function)
808
+ with patch_environment(**current_env):
809
+ xmp.spawn(PrepareForLaunch(main_function), args=(), nprocs=args.num_processes)
810
+
811
+
812
+ def tpu_pod_launcher(args):
813
+ from torch_xla.distributed import xla_dist
814
+
815
+ current_env = {}
816
+ args, current_env = prepare_tpu(args, current_env, True)
817
+ debug = getattr(args, "debug", False)
818
+
819
+ training_script = args.training_script
820
+ training_script_args = args.training_script_args
821
+ new_args = _filter_args(
822
+ args, xla_dist.get_args_parser(), ["--tpu", args.tpu_name, "--positional", "", "--restart-tpuvm-pod-server"]
823
+ )
824
+
825
+ if args.tpu_use_sudo:
826
+ new_cmd = ["sudo"]
827
+ else:
828
+ new_cmd = []
829
+
830
+ new_cmd += [
831
+ "accelerate-launch",
832
+ "--tpu",
833
+ "--no_tpu_cluster",
834
+ "--num_machines",
835
+ "1",
836
+ "--mixed_precision",
837
+ "no",
838
+ "--dynamo_backend",
839
+ "no",
840
+ "--num_processes",
841
+ str(args.num_processes),
842
+ "--main_training_function",
843
+ str(args.main_training_function),
844
+ training_script,
845
+ ] + training_script_args
846
+
847
+ new_args.positional = new_cmd
848
+ bad_flags = ""
849
+ for arg in vars(new_args):
850
+ if arg.startswith("docker_"):
851
+ value = getattr(new_args, arg)
852
+ if value != "" and value is not None:
853
+ bad_flags += f'{arg}="{value}"\n'
854
+ if bad_flags != "":
855
+ raise ValueError(
856
+ f"Docker containers are not supported for TPU pod launcher currently, please remove the following flags:\n{bad_flags}"
857
+ )
858
+ new_args.env = [f"{k}={v}" for k, v in current_env.items()]
859
+ new_args.env.append("ACCELERATE_IN_TPU_POD=1")
860
+ try:
861
+ xla_dist.resolve_and_execute(new_args)
862
+ except Exception:
863
+ if is_rich_available() and debug:
864
+ console = get_console()
865
+ console.print("\n[bold red]Using --debug, `torch_xla.xla_dist` Stack Trace:[/bold red]")
866
+ console.print_exception(suppress=[__file__], show_locals=False)
867
+ else:
868
+ raise
869
+
870
+
871
+ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):
872
+ if not is_sagemaker_available():
873
+ raise ImportError(
874
+ "Please install sagemaker to be able to launch training on Amazon SageMaker with `pip install accelerate[sagemaker]`"
875
+ )
876
+ if args.module or args.no_python:
877
+ raise ValueError(
878
+ "SageMaker requires a python training script file and cannot be used with --module or --no_python"
879
+ )
880
+
881
+ from sagemaker.huggingface import HuggingFace
882
+
883
+ args, sagemaker_inputs = prepare_sagemager_args_inputs(sagemaker_config, args)
884
+
885
+ huggingface_estimator = HuggingFace(**args)
886
+
887
+ huggingface_estimator.fit(inputs=sagemaker_inputs)
888
+ print(f"You can find your model data at: {huggingface_estimator.model_data}")
889
+
890
+
891
+ def _validate_launch_command(args):
892
+ # Sanity checks
893
+ if sum([args.multi_gpu, args.cpu, args.tpu, args.use_deepspeed, args.use_fsdp]) > 1:
894
+ raise ValueError(
895
+ "You can only use one of `--cpu`, `--multi_gpu`, `--tpu`, `--use_deepspeed`, `--use_fsdp` at a time."
896
+ )
897
+ if args.multi_gpu and (args.num_processes is not None) and (args.num_processes < 2):
898
+ raise ValueError("You need to use at least 2 processes to use `--multi_gpu`.")
899
+
900
+ defaults = None
901
+ warned = []
902
+ mp_from_config_flag = False
903
+ # Get the default from the config file.
904
+ if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu:
905
+ defaults = load_config_from_file(args.config_file)
906
+ if (
907
+ not args.multi_gpu
908
+ and not args.tpu
909
+ and not args.tpu_use_cluster
910
+ and not args.use_deepspeed
911
+ and not args.use_fsdp
912
+ and not args.use_megatron_lm
913
+ ):
914
+ args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED
915
+ args.multi_gpu = (
916
+ True
917
+ if defaults.distributed_type
918
+ in (
919
+ DistributedType.MULTI_GPU,
920
+ DistributedType.MULTI_NPU,
921
+ DistributedType.MULTI_MLU,
922
+ DistributedType.MULTI_XPU,
923
+ )
924
+ else False
925
+ )
926
+ args.tpu = defaults.distributed_type == DistributedType.XLA
927
+ args.use_fsdp = defaults.distributed_type == DistributedType.FSDP
928
+ args.use_megatron_lm = defaults.distributed_type == DistributedType.MEGATRON_LM
929
+ args.tpu_use_cluster = defaults.tpu_use_cluster if args.tpu else False
930
+ if args.gpu_ids is None:
931
+ if defaults.gpu_ids is not None:
932
+ args.gpu_ids = defaults.gpu_ids
933
+ else:
934
+ args.gpu_ids = "all"
935
+
936
+ if args.multi_gpu and args.num_machines is None:
937
+ args.num_machines = defaults.num_machines
938
+
939
+ if len(args.gpu_ids.split(",")) < 2 and (args.gpu_ids != "all") and args.multi_gpu and args.num_machines <= 1:
940
+ raise ValueError(
941
+ "Less than two GPU ids were configured and tried to run on on multiple GPUs. "
942
+ "Please ensure at least two are specified for `--gpu_ids`, or use `--gpu_ids='all'`."
943
+ )
944
+ if defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE:
945
+ # Update args with the defaults
946
+ for name, attr in defaults.__dict__.items():
947
+ if isinstance(attr, dict):
948
+ for k in defaults.deepspeed_config:
949
+ setattr(args, k, defaults.deepspeed_config[k])
950
+ for k in defaults.fsdp_config:
951
+ arg_to_set = k
952
+ if "fsdp" not in arg_to_set:
953
+ arg_to_set = "fsdp_" + arg_to_set
954
+ setattr(args, arg_to_set, defaults.fsdp_config[k])
955
+ for k in defaults.megatron_lm_config:
956
+ setattr(args, k, defaults.megatron_lm_config[k])
957
+ for k in defaults.dynamo_config:
958
+ setattr(args, k, defaults.dynamo_config[k])
959
+ for k in defaults.ipex_config:
960
+ setattr(args, k, defaults.ipex_config[k])
961
+ for k in defaults.mpirun_config:
962
+ setattr(args, k, defaults.mpirun_config[k])
963
+ continue
964
+
965
+ # Those args are handled separately
966
+ if (
967
+ name not in ["compute_environment", "mixed_precision", "distributed_type"]
968
+ and getattr(args, name, None) is None
969
+ ):
970
+ setattr(args, name, attr)
971
+ if not args.debug:
972
+ args.debug = defaults.debug
973
+
974
+ if not args.mixed_precision:
975
+ if defaults.mixed_precision is None:
976
+ args.mixed_precision = "no"
977
+ else:
978
+ args.mixed_precision = defaults.mixed_precision
979
+ mp_from_config_flag = True
980
+ else:
981
+ if args.use_cpu or (args.use_xpu and torch.xpu.is_available()):
982
+ native_amp = is_torch_version(">=", "1.10")
983
+ else:
984
+ native_amp = is_bf16_available(True)
985
+ if (
986
+ args.mixed_precision == "bf16"
987
+ and not native_amp
988
+ and not (args.tpu and is_torch_xla_available(check_is_tpu=True))
989
+ ):
990
+ raise ValueError("bf16 mixed precision requires PyTorch >= 1.10 and a supported device.")
991
+
992
+ # Silently set the default here
993
+ if args.dynamo_backend is None:
994
+ args.dynamo_backend = "no"
995
+ else:
996
+ if args.num_processes is None:
997
+ if args.use_xpu and is_xpu_available():
998
+ args.num_processes = torch.xpu.device_count()
999
+ elif is_mlu_available():
1000
+ args.num_processes = torch.mlu.device_count()
1001
+ elif is_npu_available():
1002
+ args.num_processes = torch.npu.device_count()
1003
+ else:
1004
+ args.num_processes = torch.cuda.device_count()
1005
+ warned.append(f"\t`--num_processes` was set to a value of `{args.num_processes}`")
1006
+ if args.debug is None:
1007
+ args.debug = False
1008
+ if not args.multi_gpu and (
1009
+ (args.use_xpu and is_xpu_available() and torch.xpu.device_count() > 1)
1010
+ or (is_mlu_available() and torch.mlu.device_count() > 1)
1011
+ or (is_npu_available() and torch.npu.device_count() > 1)
1012
+ or (torch.cuda.device_count() > 1)
1013
+ ):
1014
+ warned.append(
1015
+ "\t\tMore than one GPU was found, enabling multi-GPU training.\n"
1016
+ "\t\tIf this was unintended please pass in `--num_processes=1`."
1017
+ )
1018
+ args.multi_gpu = True
1019
+ if args.num_machines is None:
1020
+ warned.append("\t`--num_machines` was set to a value of `1`")
1021
+ args.num_machines = 1
1022
+ if args.mixed_precision is None:
1023
+ warned.append("\t`--mixed_precision` was set to a value of `'no'`")
1024
+ args.mixed_precision = "no"
1025
+ if not hasattr(args, "use_cpu"):
1026
+ args.use_cpu = args.cpu
1027
+ if args.dynamo_backend is None:
1028
+ warned.append("\t`--dynamo_backend` was set to a value of `'no'`")
1029
+ args.dynamo_backend = "no"
1030
+ if args.debug:
1031
+ logger.debug("Running script in debug mode, expect distributed operations to be slightly slower.")
1032
+
1033
+ is_aws_env_disabled = defaults is None or (
1034
+ defaults is not None and defaults.compute_environment != ComputeEnvironment.AMAZON_SAGEMAKER
1035
+ )
1036
+ if is_aws_env_disabled and args.num_cpu_threads_per_process is None:
1037
+ args.num_cpu_threads_per_process = 1
1038
+ if args.use_cpu and args.num_processes >= 1:
1039
+ local_size = get_int_from_env(
1040
+ ["MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], 1
1041
+ )
1042
+ threads_per_process = int(psutil.cpu_count(logical=False) / local_size)
1043
+ if threads_per_process > 1:
1044
+ args.num_cpu_threads_per_process = threads_per_process
1045
+ warned.append(
1046
+ f"\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance when training on CPUs"
1047
+ )
1048
+
1049
+ if any(warned):
1050
+ message = "The following values were not passed to `accelerate launch` and had defaults used instead:\n"
1051
+ message += "\n".join(warned)
1052
+ message += (
1053
+ "\nTo avoid this warning pass in values for each of the problematic parameters or run `accelerate config`."
1054
+ )
1055
+ logger.warning(message)
1056
+ return args, defaults, mp_from_config_flag
1057
+
1058
+
1059
+ def launch_command(args):
1060
+ args, defaults, mp_from_config_flag = _validate_launch_command(args)
1061
+ # Use the proper launcher
1062
+ if args.use_deepspeed and not args.cpu:
1063
+ args.deepspeed_fields_from_accelerate_config = list(defaults.deepspeed_config.keys()) if defaults else []
1064
+ if mp_from_config_flag:
1065
+ args.deepspeed_fields_from_accelerate_config.append("mixed_precision")
1066
+ args.deepspeed_fields_from_accelerate_config = ",".join(args.deepspeed_fields_from_accelerate_config)
1067
+ deepspeed_launcher(args)
1068
+ elif args.use_fsdp and not args.cpu:
1069
+ multi_gpu_launcher(args)
1070
+ elif args.use_megatron_lm and not args.cpu:
1071
+ multi_gpu_launcher(args)
1072
+ elif args.multi_gpu and not args.cpu:
1073
+ multi_gpu_launcher(args)
1074
+ elif args.tpu and not args.cpu:
1075
+ if args.tpu_use_cluster:
1076
+ tpu_pod_launcher(args)
1077
+ else:
1078
+ tpu_launcher(args)
1079
+ elif defaults is not None and defaults.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
1080
+ sagemaker_launcher(defaults, args)
1081
+ else:
1082
+ simple_launcher(args)
1083
+
1084
+
1085
+ def main():
1086
+ parser = launch_command_parser()
1087
+ args = parser.parse_args()
1088
+ launch_command(args)
1089
+
1090
+
1091
+ if __name__ == "__main__":
1092
+ main()
llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from .selection_menu import BulletMenu
llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (246 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/cursor.cpython-310.pyc ADDED
Binary file (1.44 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/helpers.cpython-310.pyc ADDED
Binary file (1.65 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/input.cpython-310.pyc ADDED
Binary file (2.39 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/keymap.cpython-310.pyc ADDED
Binary file (2.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/selection_menu.cpython-310.pyc ADDED
Binary file (4.44 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/cursor.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ A utility for showing and hiding the terminal cursor on Windows and Linux, based on https://github.com/bchao1/bullet
17
+ """
18
+
19
+ import os
20
+ import sys
21
+ from contextlib import contextmanager
22
+
23
+
24
+ # Windows only
25
+ if os.name == "nt":
26
+ import ctypes
27
+ import msvcrt # noqa
28
+
29
+ class CursorInfo(ctypes.Structure):
30
+ # _fields is a specific attr expected by ctypes
31
+ _fields_ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
32
+
33
+
34
+ def hide_cursor():
35
+ if os.name == "nt":
36
+ ci = CursorInfo()
37
+ handle = ctypes.windll.kernel32.GetStdHandle(-11)
38
+ ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))
39
+ ci.visible = False
40
+ ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))
41
+ elif os.name == "posix":
42
+ sys.stdout.write("\033[?25l")
43
+ sys.stdout.flush()
44
+
45
+
46
+ def show_cursor():
47
+ if os.name == "nt":
48
+ ci = CursorInfo()
49
+ handle = ctypes.windll.kernel32.GetStdHandle(-11)
50
+ ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))
51
+ ci.visible = True
52
+ ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))
53
+ elif os.name == "posix":
54
+ sys.stdout.write("\033[?25h")
55
+ sys.stdout.flush()
56
+
57
+
58
+ @contextmanager
59
+ def hide():
60
+ "Context manager to hide the terminal cursor"
61
+ try:
62
+ hide_cursor()
63
+ yield
64
+ finally:
65
+ show_cursor()
llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/helpers.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ A variety of helper functions and constants when dealing with terminal menu choices, based on
17
+ https://github.com/bchao1/bullet
18
+ """
19
+
20
+ import enum
21
+ import shutil
22
+ import sys
23
+
24
+
25
+ TERMINAL_WIDTH, _ = shutil.get_terminal_size()
26
+
27
+ CURSOR_TO_CHAR = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"}
28
+
29
+
30
+ class Direction(enum.Enum):
31
+ UP = 0
32
+ DOWN = 1
33
+
34
+
35
+ def forceWrite(content, end=""):
36
+ sys.stdout.write(str(content) + end)
37
+ sys.stdout.flush()
38
+
39
+
40
+ def writeColor(content, color, end=""):
41
+ forceWrite(f"\u001b[{color}m{content}\u001b[0m", end)
42
+
43
+
44
+ def reset_cursor():
45
+ forceWrite("\r")
46
+
47
+
48
+ def move_cursor(num_lines: int, direction: str):
49
+ forceWrite(f"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}")
50
+
51
+
52
+ def clear_line():
53
+ forceWrite(" " * TERMINAL_WIDTH)
54
+ reset_cursor()
55
+
56
+
57
+ def linebreak():
58
+ reset_cursor()
59
+ forceWrite("-" * TERMINAL_WIDTH)
llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/input.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ This file contains utilities for handling input from the user and registering specific keys to specific functions,
17
+ based on https://github.com/bchao1/bullet
18
+ """
19
+
20
+ from typing import List
21
+
22
+ from .keymap import KEYMAP, get_character
23
+
24
+
25
+ def mark(key: str):
26
+ """
27
+ Mark the function with the key code so it can be handled in the register
28
+ """
29
+
30
+ def decorator(func):
31
+ handle = getattr(func, "handle_key", [])
32
+ handle += [key]
33
+ func.handle_key = handle
34
+ return func
35
+
36
+ return decorator
37
+
38
+
39
+ def mark_multiple(*keys: List[str]):
40
+ """
41
+ Mark the function with the key codes so it can be handled in the register
42
+ """
43
+
44
+ def decorator(func):
45
+ handle = getattr(func, "handle_key", [])
46
+ handle += keys
47
+ func.handle_key = handle
48
+ return func
49
+
50
+ return decorator
51
+
52
+
53
+ class KeyHandler(type):
54
+ """
55
+ Metaclass that adds the key handlers to the class
56
+ """
57
+
58
+ def __new__(cls, name, bases, attrs):
59
+ new_cls = super().__new__(cls, name, bases, attrs)
60
+ if not hasattr(new_cls, "key_handler"):
61
+ new_cls.key_handler = {}
62
+ new_cls.handle_input = KeyHandler.handle_input
63
+
64
+ for value in attrs.values():
65
+ handled_keys = getattr(value, "handle_key", [])
66
+ for key in handled_keys:
67
+ new_cls.key_handler[key] = value
68
+ return new_cls
69
+
70
+ @staticmethod
71
+ def handle_input(cls):
72
+ "Finds and returns the selected character if it exists in the handler"
73
+ char = get_character()
74
+ if char != KEYMAP["undefined"]:
75
+ char = ord(char)
76
+ handler = cls.key_handler.get(char)
77
+ if handler:
78
+ cls.current_selection = char
79
+ return handler(cls)
80
+ else:
81
+ return None
82
+
83
+
84
+ def register(cls):
85
+ """Adds KeyHandler metaclass to the class"""
86
+ return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy())
llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/keymap.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ Utilities relating to parsing raw characters from the keyboard, based on https://github.com/bchao1/bullet
17
+ """
18
+
19
+ import os
20
+ import string
21
+ import sys
22
+
23
+
24
+ ARROW_KEY_FLAG = 1 << 8
25
+
26
+ KEYMAP = {
27
+ "tab": ord("\t"),
28
+ "newline": ord("\r"),
29
+ "esc": 27,
30
+ "up": 65 + ARROW_KEY_FLAG,
31
+ "down": 66 + ARROW_KEY_FLAG,
32
+ "right": 67 + ARROW_KEY_FLAG,
33
+ "left": 68 + ARROW_KEY_FLAG,
34
+ "mod_int": 91,
35
+ "undefined": sys.maxsize,
36
+ "interrupt": 3,
37
+ "insert": 50,
38
+ "delete": 51,
39
+ "pg_up": 53,
40
+ "pg_down": 54,
41
+ }
42
+
43
+ KEYMAP["arrow_begin"] = KEYMAP["up"]
44
+ KEYMAP["arrow_end"] = KEYMAP["left"]
45
+
46
+ if sys.platform == "win32":
47
+ WIN_CH_BUFFER = []
48
+ WIN_KEYMAP = {
49
+ b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
50
+ b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
51
+ b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
52
+ b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
53
+ b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
54
+ b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
55
+ b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
56
+ b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
57
+ }
58
+
59
+ for i in range(10):
60
+ KEYMAP[str(i)] = ord(str(i))
61
+
62
+
63
+ def get_raw_chars():
64
+ "Gets raw characters from inputs"
65
+ if os.name == "nt":
66
+ import msvcrt
67
+
68
+ encoding = "mbcs"
69
+ # Flush the keyboard buffer
70
+ while msvcrt.kbhit():
71
+ msvcrt.getch()
72
+ if len(WIN_CH_BUFFER) == 0:
73
+ # Read the keystroke
74
+ ch = msvcrt.getch()
75
+
76
+ # If it is a prefix char, get second part
77
+ if ch in (b"\x00", b"\xe0"):
78
+ ch2 = ch + msvcrt.getch()
79
+ # Translate actual Win chars to bullet char types
80
+ try:
81
+ chx = chr(WIN_KEYMAP[ch2])
82
+ WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"]))
83
+ WIN_CH_BUFFER.append(chx)
84
+ if ord(chx) in (
85
+ KEYMAP["insert"] - 1 << 9,
86
+ KEYMAP["delete"] - 1 << 9,
87
+ KEYMAP["pg_up"] - 1 << 9,
88
+ KEYMAP["pg_down"] - 1 << 9,
89
+ ):
90
+ WIN_CH_BUFFER.append(chr(126))
91
+ ch = chr(KEYMAP["esc"])
92
+ except KeyError:
93
+ ch = ch2[1]
94
+ else:
95
+ ch = ch.decode(encoding)
96
+ else:
97
+ ch = WIN_CH_BUFFER.pop(0)
98
+ elif os.name == "posix":
99
+ import termios
100
+ import tty
101
+
102
+ fd = sys.stdin.fileno()
103
+ old_settings = termios.tcgetattr(fd)
104
+ try:
105
+ tty.setraw(fd)
106
+ ch = sys.stdin.read(1)
107
+ finally:
108
+ termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
109
+ return ch
110
+
111
+
112
+ def get_character():
113
+ "Gets a character from the keyboard and returns the key code"
114
+ char = get_raw_chars()
115
+ if ord(char) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
116
+ return char
117
+
118
+ elif ord(char) == KEYMAP["esc"]:
119
+ combo = get_raw_chars()
120
+ if ord(combo) == KEYMAP["mod_int"]:
121
+ key = get_raw_chars()
122
+ if ord(key) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(key) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
123
+ return chr(ord(key) + ARROW_KEY_FLAG)
124
+ else:
125
+ return KEYMAP["undefined"]
126
+ else:
127
+ return get_raw_chars()
128
+
129
+ else:
130
+ if char in string.printable:
131
+ return char
132
+ else:
133
+ return KEYMAP["undefined"]
llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/selection_menu.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ Main driver for the selection menu, based on https://github.com/bchao1/bullet
17
+ """
18
+
19
+ import builtins
20
+ import sys
21
+
22
+ from ...utils.imports import _is_package_available
23
+ from . import cursor, input
24
+ from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
25
+ from .keymap import KEYMAP
26
+
27
+
28
+ in_colab = False
29
+ try:
30
+ in_colab = _is_package_available("google.colab")
31
+ except ModuleNotFoundError:
32
+ pass
33
+
34
+
35
+ @input.register
36
+ class BulletMenu:
37
+ """
38
+ A CLI menu to select a choice from a list of choices using the keyboard.
39
+ """
40
+
41
+ def __init__(self, prompt: str = None, choices: list = []):
42
+ self.position = 0
43
+ self.choices = choices
44
+ self.prompt = prompt
45
+ if sys.platform == "win32":
46
+ self.arrow_char = "*"
47
+ else:
48
+ self.arrow_char = "➔ "
49
+
50
+ def write_choice(self, index, end: str = ""):
51
+ if sys.platform != "win32":
52
+ writeColor(self.choices[index], 32, end)
53
+ else:
54
+ forceWrite(self.choices[index], end)
55
+
56
+ def print_choice(self, index: int):
57
+ "Prints the choice at the given index"
58
+ if index == self.position:
59
+ forceWrite(f" {self.arrow_char} ")
60
+ self.write_choice(index)
61
+ else:
62
+ forceWrite(f" {self.choices[index]}")
63
+ reset_cursor()
64
+
65
+ def move_direction(self, direction: Direction, num_spaces: int = 1):
66
+ "Should not be directly called, used to move a direction of either up or down"
67
+ old_position = self.position
68
+ if direction == Direction.DOWN:
69
+ if self.position + 1 >= len(self.choices):
70
+ return
71
+ self.position += num_spaces
72
+ else:
73
+ if self.position - 1 < 0:
74
+ return
75
+ self.position -= num_spaces
76
+ clear_line()
77
+ self.print_choice(old_position)
78
+ move_cursor(num_spaces, direction.name)
79
+ self.print_choice(self.position)
80
+
81
+ @input.mark(KEYMAP["up"])
82
+ def move_up(self):
83
+ self.move_direction(Direction.UP)
84
+
85
+ @input.mark(KEYMAP["down"])
86
+ def move_down(self):
87
+ self.move_direction(Direction.DOWN)
88
+
89
+ @input.mark(KEYMAP["newline"])
90
+ def select(self):
91
+ move_cursor(len(self.choices) - self.position, "DOWN")
92
+ return self.position
93
+
94
+ @input.mark(KEYMAP["interrupt"])
95
+ def interrupt(self):
96
+ move_cursor(len(self.choices) - self.position, "DOWN")
97
+ raise KeyboardInterrupt
98
+
99
+ @input.mark_multiple(*[KEYMAP[str(number)] for number in range(10)])
100
+ def select_row(self):
101
+ index = int(chr(self.current_selection))
102
+ movement = index - self.position
103
+ if index == self.position:
104
+ return
105
+ if index < len(self.choices):
106
+ if self.position > index:
107
+ self.move_direction(Direction.UP, -movement)
108
+ elif self.position < index:
109
+ self.move_direction(Direction.DOWN, movement)
110
+ else:
111
+ return
112
+ else:
113
+ return
114
+
115
+ def run(self, default_choice: int = 0):
116
+ "Start the menu and return the selected choice"
117
+ if self.prompt:
118
+ linebreak()
119
+ forceWrite(self.prompt, "\n")
120
+ if in_colab:
121
+ forceWrite("Please input a choice index (starting from 0), and press enter", "\n")
122
+ else:
123
+ forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n")
124
+ self.position = default_choice
125
+ for i in range(len(self.choices)):
126
+ self.print_choice(i)
127
+ forceWrite("\n")
128
+ move_cursor(len(self.choices) - self.position, "UP")
129
+ with cursor.hide():
130
+ while True:
131
+ if in_colab:
132
+ try:
133
+ choice = int(builtins.input())
134
+ except ValueError:
135
+ choice = default_choice
136
+ else:
137
+ choice = self.handle_input()
138
+ if choice is not None:
139
+ reset_cursor()
140
+ for _ in range(len(self.choices) + 1):
141
+ move_cursor(1, "UP")
142
+ clear_line()
143
+ self.write_choice(choice, "\n")
144
+ return choice
llmeval-env/lib/python3.10/site-packages/accelerate/commands/test.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+
19
+ from accelerate.test_utils import execute_subprocess_async, path_in_accelerate_package
20
+
21
+
22
+ def test_command_parser(subparsers=None):
23
+ if subparsers is not None:
24
+ parser = subparsers.add_parser("test")
25
+ else:
26
+ parser = argparse.ArgumentParser("Accelerate test command")
27
+
28
+ parser.add_argument(
29
+ "--config_file",
30
+ default=None,
31
+ help=(
32
+ "The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
33
+ "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
34
+ "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
35
+ "with 'huggingface'."
36
+ ),
37
+ )
38
+
39
+ if subparsers is not None:
40
+ parser.set_defaults(func=test_command)
41
+ return parser
42
+
43
+
44
+ def test_command(args):
45
+ script_name = path_in_accelerate_package("test_utils", "scripts", "test_script.py")
46
+
47
+ if args.config_file is None:
48
+ test_args = [script_name]
49
+ else:
50
+ test_args = f"--config_file={args.config_file} {script_name}".split()
51
+
52
+ cmd = ["accelerate-launch"] + test_args
53
+ result = execute_subprocess_async(cmd)
54
+ if result.returncode == 0:
55
+ print("Test is a success! You are ready for your distributed training!")
56
+
57
+
58
+ def main():
59
+ parser = test_command_parser()
60
+ args = parser.parse_args()
61
+ test_command(args)
62
+
63
+
64
+ if __name__ == "__main__":
65
+ main()
llmeval-env/lib/python3.10/site-packages/accelerate/commands/tpu.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+ import os
19
+ import subprocess
20
+
21
+ from packaging.version import Version, parse
22
+
23
+ from accelerate.commands.config.config_args import default_config_file, load_config_from_file
24
+
25
+
26
+ _description = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
27
+
28
+
29
+ def tpu_command_parser(subparsers=None):
30
+ if subparsers is not None:
31
+ parser = subparsers.add_parser("tpu-config", description=_description)
32
+ else:
33
+ parser = argparse.ArgumentParser("Accelerate tpu-config command", description=_description)
34
+ # Core arguments
35
+ config_args = parser.add_argument_group(
36
+ "Config Arguments", "Arguments that can be configured through `accelerate config`."
37
+ )
38
+ config_args.add_argument(
39
+ "--config_file",
40
+ type=str,
41
+ default=None,
42
+ help="Path to the config file to use for accelerate.",
43
+ )
44
+ config_args.add_argument(
45
+ "--tpu_name",
46
+ default=None,
47
+ help="The name of the TPU to use. If not specified, will use the TPU specified in the config file.",
48
+ )
49
+ config_args.add_argument(
50
+ "--tpu_zone",
51
+ default=None,
52
+ help="The zone of the TPU to use. If not specified, will use the zone specified in the config file.",
53
+ )
54
+ pod_args = parser.add_argument_group("TPU Arguments", "Arguments for options ran inside the TPU.")
55
+ pod_args.add_argument(
56
+ "--use_alpha",
57
+ action="store_true",
58
+ help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.",
59
+ )
60
+ pod_args.add_argument(
61
+ "--command_file",
62
+ default=None,
63
+ help="The path to the file containing the commands to run on the pod on startup.",
64
+ )
65
+ pod_args.add_argument(
66
+ "--command",
67
+ action="append",
68
+ nargs="+",
69
+ help="A command to run on the pod. Can be passed multiple times.",
70
+ )
71
+ pod_args.add_argument(
72
+ "--install_accelerate",
73
+ action="store_true",
74
+ help="Whether to install accelerate on the pod. Defaults to False.",
75
+ )
76
+ pod_args.add_argument(
77
+ "--accelerate_version",
78
+ default="latest",
79
+ help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.",
80
+ )
81
+ pod_args.add_argument(
82
+ "--debug", action="store_true", help="If set, will print the command that would be run instead of running it."
83
+ )
84
+
85
+ if subparsers is not None:
86
+ parser.set_defaults(func=tpu_command_launcher)
87
+ return parser
88
+
89
+
90
+ def tpu_command_launcher(args):
91
+ defaults = None
92
+
93
+ # Get the default from the config file if it exists.
94
+ if args.config_file is not None or os.path.isfile(default_config_file):
95
+ defaults = load_config_from_file(args.config_file)
96
+ if not args.command_file and defaults.command_file is not None and not args.command:
97
+ args.command_file = defaults.command_file
98
+ if not args.command and defaults.commands is not None:
99
+ args.command = defaults.commands
100
+ if not args.tpu_name:
101
+ args.tpu_name = defaults.tpu_name
102
+ if not args.tpu_zone:
103
+ args.tpu_zone = defaults.tpu_zone
104
+ if args.accelerate_version == "dev":
105
+ args.accelerate_version = "git+https://github.com/huggingface/accelerate.git"
106
+ elif args.accelerate_version == "latest":
107
+ args.accelerate_version = "accelerate -U"
108
+ elif isinstance(parse(args.accelerate_version), Version):
109
+ args.accelerate_version = f"accelerate=={args.accelerate_version}"
110
+
111
+ if not args.command_file and not args.command:
112
+ raise ValueError("You must specify either a command file or a command to run on the pod.")
113
+
114
+ if args.command_file:
115
+ with open(args.command_file) as f:
116
+ args.command = [f.read().splitlines()]
117
+
118
+ # To turn list of lists into list of strings
119
+ if isinstance(args.command[0], list):
120
+ args.command = [line for cmd in args.command for line in cmd]
121
+ # Default to the shared folder and install accelerate
122
+ new_cmd = ["cd /usr/share"]
123
+ if args.install_accelerate:
124
+ new_cmd += [f"pip install {args.accelerate_version}"]
125
+ new_cmd += args.command
126
+ args.command = "; ".join(new_cmd)
127
+
128
+ # Then send it to gcloud
129
+ # Eventually try to use google-api-core to do this instead of subprocess
130
+ cmd = ["gcloud"]
131
+ if args.use_alpha:
132
+ cmd += ["alpha"]
133
+ cmd += [
134
+ "compute",
135
+ "tpus",
136
+ "tpu-vm",
137
+ "ssh",
138
+ args.tpu_name,
139
+ "--zone",
140
+ args.tpu_zone,
141
+ "--command",
142
+ args.command,
143
+ "--worker",
144
+ "all",
145
+ ]
146
+ if args.debug:
147
+ print(f"Running {' '.join(cmd)}")
148
+ return
149
+ subprocess.run(cmd)
150
+ print("Successfully setup pod.")
151
+
152
+
153
+ def main():
154
+ parser = tpu_command_parser()
155
+ args = parser.parse_args()
156
+
157
+ tpu_command_launcher(args)
llmeval-env/lib/python3.10/site-packages/accelerate/commands/utils.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import argparse
16
+
17
+
18
+ class _StoreAction(argparse.Action):
19
+ """
20
+ Custom action that allows for `-` or `_` to be passed in for an argument.
21
+ """
22
+
23
+ def __init__(self, *args, **kwargs):
24
+ super().__init__(*args, **kwargs)
25
+ new_option_strings = []
26
+ for option_string in self.option_strings:
27
+ new_option_strings.append(option_string)
28
+ if "_" in option_string[2:]:
29
+ # Add `-` version to the option string
30
+ new_option_strings.append(option_string.replace("_", "-"))
31
+ self.option_strings = new_option_strings
32
+
33
+ def __call__(self, parser, namespace, values, option_string=None):
34
+ setattr(namespace, self.dest, values)
35
+
36
+
37
+ class _StoreConstAction(_StoreAction):
38
+ """
39
+ Same as `argparse._StoreConstAction` but uses the custom `_StoreAction`.
40
+ """
41
+
42
+ def __init__(self, option_strings, dest, const, default=None, required=False, help=None):
43
+ super().__init__(
44
+ option_strings=option_strings,
45
+ dest=dest,
46
+ nargs=0,
47
+ const=const,
48
+ default=default,
49
+ required=required,
50
+ help=help,
51
+ )
52
+
53
+ def __call__(self, parser, namespace, values, option_string=None):
54
+ setattr(namespace, self.dest, self.const)
55
+
56
+
57
+ class _StoreTrueAction(_StoreConstAction):
58
+ """
59
+ Same as `argparse._StoreTrueAction` but uses the custom `_StoreConstAction`.
60
+ """
61
+
62
+ def __init__(
63
+ self,
64
+ option_strings,
65
+ dest,
66
+ default=None,
67
+ required=False,
68
+ help=None,
69
+ ):
70
+ super().__init__(
71
+ option_strings=option_strings, dest=dest, const=True, default=default, required=required, help=help
72
+ )
73
+
74
+
75
+ class CustomArgumentGroup(argparse._ArgumentGroup):
76
+ """
77
+ Custom argument group that allows for the use of `-` or `_` in arguments passed and overrides the help for each
78
+ when applicable.
79
+ """
80
+
81
+ def _add_action(self, action):
82
+ args = vars(action)
83
+ if isinstance(action, argparse._StoreTrueAction):
84
+ action = _StoreTrueAction(
85
+ args["option_strings"], args["dest"], args["default"], args["required"], args["help"]
86
+ )
87
+ elif isinstance(action, argparse._StoreConstAction):
88
+ action = _StoreConstAction(
89
+ args["option_strings"],
90
+ args["dest"],
91
+ args["const"],
92
+ args["default"],
93
+ args["required"],
94
+ args["help"],
95
+ )
96
+ elif isinstance(action, argparse._StoreAction):
97
+ action = _StoreAction(**args)
98
+ action = super()._add_action(action)
99
+ return action
100
+
101
+
102
+ class CustomArgumentParser(argparse.ArgumentParser):
103
+ """
104
+ Custom argument parser that allows for the use of `-` or `_` in arguments passed and overrides the help for each
105
+ when applicable.
106
+ """
107
+
108
+ def add_argument(self, *args, **kwargs):
109
+ if "action" in kwargs:
110
+ # Translate action -> class
111
+ if kwargs["action"] == "store_true":
112
+ kwargs["action"] = _StoreTrueAction
113
+ else:
114
+ kwargs["action"] = _StoreAction
115
+ super().add_argument(*args, **kwargs)
116
+
117
+ def add_argument_group(self, *args, **kwargs):
118
+ group = CustomArgumentGroup(self, *args, **kwargs)
119
+ self._action_groups.append(group)
120
+ return group
llmeval-env/lib/python3.10/site-packages/accelerate/data_loader.py ADDED
@@ -0,0 +1,1149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+ from contextlib import suppress
17
+ from typing import Callable, List, Optional, Union
18
+
19
+ import torch
20
+ from torch.utils.data import BatchSampler, DataLoader, IterableDataset, RandomSampler
21
+
22
+ from .logging import get_logger
23
+ from .state import AcceleratorState, DistributedType, GradientState, is_torch_xla_available
24
+ from .utils import (
25
+ RNGType,
26
+ broadcast,
27
+ broadcast_object_list,
28
+ concatenate,
29
+ find_batch_size,
30
+ get_data_structure,
31
+ initialize_tensors,
32
+ is_torch_version,
33
+ send_to_device,
34
+ slice_tensors,
35
+ synchronize_rng_states,
36
+ )
37
+
38
+
39
+ logger = get_logger(__name__)
40
+
41
+ # kwargs of the DataLoader in min version 1.4.0.
42
+ _PYTORCH_DATALOADER_KWARGS = {
43
+ "batch_size": 1,
44
+ "shuffle": False,
45
+ "sampler": None,
46
+ "batch_sampler": None,
47
+ "num_workers": 0,
48
+ "collate_fn": None,
49
+ "pin_memory": False,
50
+ "drop_last": False,
51
+ "timeout": 0,
52
+ "worker_init_fn": None,
53
+ "multiprocessing_context": None,
54
+ "generator": None,
55
+ "prefetch_factor": 2,
56
+ "persistent_workers": False,
57
+ }
58
+
59
+ # kwargs added after by version
60
+ _PYTORCH_DATALOADER_ADDITIONAL_KWARGS = {}
61
+
62
+ for v, additional_kwargs in _PYTORCH_DATALOADER_ADDITIONAL_KWARGS.items():
63
+ if is_torch_version(">=", v):
64
+ _PYTORCH_DATALOADER_KWARGS.update(additional_kwargs)
65
+
66
+
67
+ class SeedableRandomSampler(RandomSampler):
68
+ """
69
+ Same as a random sampler, except that in `__iter__` a seed can be used.
70
+
71
+ Needed specifically in distributed cases, when the random generator for each GPU needs to start from the same seed
72
+ and be fully reproducable on multiple iterations.
73
+
74
+ If a custom `generator` is passed, it will rely on its initial seed as well as the current iteration it is on
75
+ (stored in `self.epoch`).
76
+ """
77
+
78
+ def __init__(self, *args, **kwargs):
79
+ super().__init__(*args, **kwargs)
80
+ self.epoch = 0
81
+ self.initial_seed = torch.random.initial_seed()
82
+
83
+ def __iter__(self):
84
+ if self.generator is None:
85
+ self.generator = torch.Generator()
86
+ self.generator.manual_seed(self.initial_seed)
87
+
88
+ # Allow `self.epoch` to modify the seed of the generator
89
+ seed = self.epoch + self.initial_seed
90
+ # print("Setting seed at epoch", self.epoch, seed)
91
+ self.generator.manual_seed(seed)
92
+ yield from super().__iter__()
93
+ self.set_epoch(self.epoch + 1)
94
+
95
+ def set_epoch(self, epoch: int):
96
+ "Sets the current iteration of the sampler."
97
+ self.epoch = epoch
98
+
99
+
100
+ class BatchSamplerShard(BatchSampler):
101
+ """
102
+ Wraps a PyTorch `BatchSampler` to generate batches for one of the processes only. Instances of this class will
103
+ always yield a number of batches that is a round multiple of `num_processes` and that all have the same size.
104
+ Depending on the value of the `drop_last` attribute of the batch sampler passed, it will either stop the iteration
105
+ at the first batch that would be too small / not present on all processes or loop with indices from the beginning.
106
+
107
+ Args:
108
+ batch_sampler (`torch.utils.data.sampler.BatchSampler`):
109
+ The batch sampler to split in several shards.
110
+ num_processes (`int`, *optional*, defaults to 1):
111
+ The number of processes running concurrently.
112
+ process_index (`int`, *optional*, defaults to 0):
113
+ The index of the current process.
114
+ split_batches (`bool`, *optional*, defaults to `False`):
115
+ Whether the shards should be created by splitting a batch to give a piece of it on each process, or by
116
+ yielding different full batches on each process.
117
+
118
+ On two processes with a sampler of `[[0, 1, 2, 3], [4, 5, 6, 7]]`, this will result in:
119
+
120
+ - the sampler on process 0 to yield `[0, 1, 2, 3]` and the sampler on process 1 to yield `[4, 5, 6, 7]` if
121
+ this argument is set to `False`.
122
+ - the sampler on process 0 to yield `[0, 1]` then `[4, 5]` and the sampler on process 1 to yield `[2, 3]`
123
+ then `[6, 7]` if this argument is set to `True`.
124
+ even_batches (`bool`, *optional*, defaults to `True`):
125
+ Whether or not to loop back at the beginning of the sampler when the number of samples is not a round
126
+ multiple of (original batch size / number of processes).
127
+
128
+ <Tip warning={true}>
129
+
130
+ `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches`
131
+ equal to `False`
132
+
133
+ </Tip>"""
134
+
135
+ def __init__(
136
+ self,
137
+ batch_sampler: BatchSampler,
138
+ num_processes: int = 1,
139
+ process_index: int = 0,
140
+ split_batches: bool = False,
141
+ even_batches: bool = True,
142
+ ):
143
+ if split_batches and batch_sampler.batch_size % num_processes != 0:
144
+ raise ValueError(
145
+ f"To use `BatchSamplerShard` in `split_batches` mode, the batch size ({batch_sampler.batch_size}) "
146
+ f"needs to be a round multiple of the number of processes ({num_processes})."
147
+ )
148
+ self.batch_sampler = batch_sampler
149
+ self.num_processes = num_processes
150
+ self.process_index = process_index
151
+ self.split_batches = split_batches
152
+ self.even_batches = even_batches
153
+ self.batch_size = getattr(batch_sampler, "batch_size", None)
154
+ self.drop_last = getattr(batch_sampler, "drop_last", False)
155
+ if self.batch_size is None and self.even_batches:
156
+ raise ValueError(
157
+ "You need to use `even_batches=False` when the batch sampler has no batch size. If you "
158
+ "are not calling this method directly, set `accelerator.even_batches=False` instead."
159
+ )
160
+
161
+ @property
162
+ def total_length(self):
163
+ return len(self.batch_sampler)
164
+
165
+ def __len__(self):
166
+ if self.split_batches:
167
+ # Split batches does not change the length of the batch sampler
168
+ return len(self.batch_sampler)
169
+ if len(self.batch_sampler) % self.num_processes == 0:
170
+ # If the length is a round multiple of the number of processes, it's easy.
171
+ return len(self.batch_sampler) // self.num_processes
172
+ length = len(self.batch_sampler) // self.num_processes
173
+ if self.drop_last:
174
+ # Same if we drop the remainder.
175
+ return length
176
+ elif self.even_batches:
177
+ # When we even batches we always get +1
178
+ return length + 1
179
+ else:
180
+ # Otherwise it depends on the process index.
181
+ return length + 1 if self.process_index < len(self.batch_sampler) % self.num_processes else length
182
+
183
+ def __iter__(self):
184
+ return self._iter_with_split() if self.split_batches else self._iter_with_no_split()
185
+
186
+ def _iter_with_split(self):
187
+ initial_data = []
188
+ batch_length = self.batch_sampler.batch_size // self.num_processes
189
+ for idx, batch in enumerate(self.batch_sampler):
190
+ if idx == 0:
191
+ initial_data = batch
192
+ if len(batch) == self.batch_size:
193
+ # If the batch is full, we yield the part of it this process is responsible of.
194
+ yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]
195
+
196
+ # If drop_last is True of the last batch was full, iteration is over, otherwise...
197
+ if not self.drop_last and len(initial_data) > 0 and len(batch) < self.batch_size:
198
+ if not self.even_batches:
199
+ if len(batch) > batch_length * self.process_index:
200
+ yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]
201
+ else:
202
+ # For degenerate cases where the dataset has less than num_process * batch_size samples
203
+ while len(initial_data) < self.batch_size:
204
+ initial_data += initial_data
205
+ batch = batch + initial_data
206
+ yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]
207
+
208
+ def _iter_with_no_split(self):
209
+ initial_data = []
210
+ batch_to_yield = []
211
+ for idx, batch in enumerate(self.batch_sampler):
212
+ # We gather the initial indices in case we need to circle back at the end.
213
+ if not self.drop_last and idx < self.num_processes:
214
+ initial_data += batch
215
+ # We identify the batch to yield but wait until we ar sure every process gets a full batch before actually
216
+ # yielding it.
217
+ if idx % self.num_processes == self.process_index:
218
+ batch_to_yield = batch
219
+ if idx % self.num_processes == self.num_processes - 1 and (
220
+ self.batch_size is None or len(batch) == self.batch_size
221
+ ):
222
+ yield batch_to_yield
223
+ batch_to_yield = []
224
+
225
+ # If drop_last is True, iteration is over, otherwise...
226
+ if not self.drop_last and len(initial_data) > 0:
227
+ if not self.even_batches:
228
+ if len(batch_to_yield) > 0:
229
+ yield batch_to_yield
230
+ else:
231
+ # ... we yield the complete batch we had saved before if it has the proper length
232
+ if len(batch_to_yield) == self.batch_size:
233
+ yield batch_to_yield
234
+
235
+ # For degenerate cases where the dataset has less than num_process * batch_size samples
236
+ while len(initial_data) < self.num_processes * self.batch_size:
237
+ initial_data += initial_data
238
+
239
+ # If the last batch seen was of the proper size, it has been yielded by its process so we move to the next
240
+ if len(batch) == self.batch_size:
241
+ batch = []
242
+ idx += 1
243
+
244
+ # Make sure we yield a multiple of self.num_processes batches
245
+ cycle_index = 0
246
+ while idx % self.num_processes != 0 or len(batch) > 0:
247
+ end_index = cycle_index + self.batch_size - len(batch)
248
+ batch += initial_data[cycle_index:end_index]
249
+ if idx % self.num_processes == self.process_index:
250
+ yield batch
251
+ cycle_index = end_index
252
+ batch = []
253
+ idx += 1
254
+
255
+
256
+ class IterableDatasetShard(IterableDataset):
257
+ """
258
+ Wraps a PyTorch `IterableDataset` to generate samples for one of the processes only. Instances of this class will
259
+ always yield a number of samples that is a round multiple of the actual batch size (depending of the value of
260
+ `split_batches`, this is either `batch_size` or `batch_size x num_processes`). Depending on the value of the
261
+ `drop_last` attribute of the batch sampler passed, it will either stop the iteration at the first batch that would
262
+ be too small or loop with indices from the beginning.
263
+
264
+ Args:
265
+ dataset (`torch.utils.data.dataset.IterableDataset`):
266
+ The batch sampler to split in several shards.
267
+ batch_size (`int`, *optional*, defaults to 1):
268
+ The size of the batches per shard (if `split_batches=False`) or the size of the batches (if
269
+ `split_batches=True`).
270
+ drop_last (`bool`, *optional*, defaults to `False`):
271
+ Whether or not to drop the last incomplete batch or complete the last batches by using the samples from the
272
+ beginning.
273
+ num_processes (`int`, *optional*, defaults to 1):
274
+ The number of processes running concurrently.
275
+ process_index (`int`, *optional*, defaults to 0):
276
+ The index of the current process.
277
+ split_batches (`bool`, *optional*, defaults to `False`):
278
+ Whether the shards should be created by splitting a batch to give a piece of it on each process, or by
279
+ yielding different full batches on each process.
280
+
281
+ On two processes with an iterable dataset yielding of `[0, 1, 2, 3, 4, 5, 6, 7]`, this will result in:
282
+
283
+ - the shard on process 0 to yield `[0, 1, 2, 3]` and the shard on process 1 to yield `[4, 5, 6, 7]` if this
284
+ argument is set to `False`.
285
+ - the shard on process 0 to yield `[0, 1, 4, 5]` and the sampler on process 1 to yield `[2, 3, 6, 7]` if
286
+ this argument is set to `True`.
287
+ """
288
+
289
+ def __init__(
290
+ self,
291
+ dataset: IterableDataset,
292
+ batch_size: int = 1,
293
+ drop_last: bool = False,
294
+ num_processes: int = 1,
295
+ process_index: int = 0,
296
+ split_batches: bool = False,
297
+ ):
298
+ if split_batches and batch_size > 1 and batch_size % num_processes != 0:
299
+ raise ValueError(
300
+ f"To use `IterableDatasetShard` in `split_batches` mode, the batch size ({batch_size}) "
301
+ f"needs to be a round multiple of the number of processes ({num_processes})."
302
+ )
303
+ self.dataset = dataset
304
+ self.batch_size = batch_size
305
+ self.drop_last = drop_last
306
+ self.num_processes = num_processes
307
+ self.process_index = process_index
308
+ self.split_batches = split_batches
309
+
310
+ def set_epoch(self, epoch):
311
+ self.epoch = epoch
312
+ if hasattr(self.dataset, "set_epoch"):
313
+ self.dataset.set_epoch(epoch)
314
+
315
+ def __len__(self):
316
+ # We will just raise the downstream error if the underlying dataset is not sized
317
+ if self.drop_last:
318
+ return (len(self.dataset) // (self.batch_size * self.num_processes)) * self.batch_size
319
+ else:
320
+ return math.ceil(len(self.dataset) / (self.batch_size * self.num_processes)) * self.batch_size
321
+
322
+ def __iter__(self):
323
+ if (
324
+ not hasattr(self.dataset, "set_epoch")
325
+ and hasattr(self.dataset, "generator")
326
+ and isinstance(self.dataset.generator, torch.Generator)
327
+ ):
328
+ self.dataset.generator.manual_seed(self.epoch)
329
+ real_batch_size = self.batch_size if self.split_batches else (self.batch_size * self.num_processes)
330
+ process_batch_size = (self.batch_size // self.num_processes) if self.split_batches else self.batch_size
331
+ process_slice = range(self.process_index * process_batch_size, (self.process_index + 1) * process_batch_size)
332
+
333
+ first_batch = None
334
+ current_batch = []
335
+ for element in self.dataset:
336
+ current_batch.append(element)
337
+ # Wait to have a full batch before yielding elements.
338
+ if len(current_batch) == real_batch_size:
339
+ for i in process_slice:
340
+ yield current_batch[i]
341
+ if first_batch is None:
342
+ first_batch = current_batch.copy()
343
+ current_batch = []
344
+
345
+ # Finished if drop_last is True, otherwise complete the last batch with elements from the beginning.
346
+ if not self.drop_last and len(current_batch) > 0:
347
+ if first_batch is None:
348
+ first_batch = current_batch.copy()
349
+ while len(current_batch) < real_batch_size:
350
+ current_batch += first_batch
351
+ for i in process_slice:
352
+ yield current_batch[i]
353
+
354
+
355
+ class DataLoaderStateMixin:
356
+ """
357
+ Mixin class that adds a state to a `DataLoader` to keep track of the status inside the dataloader such as at the
358
+ end of the iteration, the number of items in the dataset in the last batch relative to the batch size, and other
359
+ useful information that might be needed.
360
+
361
+ **Available attributes:**
362
+
363
+ - **end_of_dataloader** (`bool`) -- Whether at the last iteration or batch
364
+ - **remainder** (`int`) -- The number of items that are remaining in the last batch, relative to the total
365
+ batch size
366
+
367
+ """
368
+
369
+ def __init_subclass__(cls, **kwargs):
370
+ cls.end_of_dataloader = False
371
+ cls.remainder = -1
372
+
373
+ def reset(self):
374
+ self.end_of_dataloader = False
375
+ self.remainder = -1
376
+
377
+ def begin(self):
378
+ "Prepares the gradient state for the current dataloader"
379
+ self.reset()
380
+ with suppress(Exception):
381
+ if not self._drop_last:
382
+ length = getattr(self.dataset, "total_dataset_length", len(self.dataset))
383
+ self.remainder = length % self.total_batch_size
384
+ self.gradient_state._add_dataloader(self)
385
+
386
+ def end(self):
387
+ "Cleans up the gradient state after exiting the dataloader"
388
+ self.gradient_state._remove_dataloader(self)
389
+
390
+
391
+ class DataLoaderShard(DataLoader, DataLoaderStateMixin):
392
+ """
393
+ Subclass of a PyTorch `DataLoader` that will deal with device placement and current distributed setup.
394
+
395
+ Args:
396
+ dataset (`torch.utils.data.dataset.Dataset`):
397
+ The dataset to use to build this datalaoder.
398
+ device (`torch.device`, *optional*):
399
+ If passed, the device to put all batches on.
400
+ rng_types (list of `str` or [`~utils.RNGType`]):
401
+ The list of random number generators to synchronize at the beginning of each iteration. Should be one or
402
+ several of:
403
+
404
+ - `"torch"`: the base torch random number generator
405
+ - `"cuda"`: the CUDA random number generator (GPU only)
406
+ - `"xla"`: the XLA random number generator (TPU only)
407
+ - `"generator"`: an optional `torch.Generator`
408
+ synchronized_generator (`torch.Generator`, *optional*):
409
+ A random number generator to keep synchronized across processes.
410
+ skip_batches (`int`, *optional*, defaults to 0):
411
+ The number of batches to skip at the beginning.
412
+ **kwargs (additional keyword arguments, *optional*):
413
+ All other keyword arguments to pass to the regular `DataLoader` initialization.
414
+
415
+ **Available attributes:**
416
+
417
+ - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.
418
+ Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total
419
+ number of processes
420
+
421
+ - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
422
+ """
423
+
424
+ def __init__(
425
+ self,
426
+ dataset,
427
+ device=None,
428
+ rng_types=None,
429
+ synchronized_generator=None,
430
+ skip_batches=0,
431
+ _drop_last: bool = False,
432
+ _non_blocking: bool = False,
433
+ **kwargs,
434
+ ):
435
+ super().__init__(dataset, **kwargs)
436
+ self.device = device
437
+ self.rng_types = rng_types
438
+ self.synchronized_generator = synchronized_generator
439
+ self.skip_batches = skip_batches
440
+ self.gradient_state = GradientState()
441
+ self._drop_last = _drop_last
442
+ self._non_blocking = _non_blocking
443
+ self.iteration = 0
444
+
445
+ def __iter__(self):
446
+ if self.rng_types is not None:
447
+ synchronize_rng_states(self.rng_types, self.synchronized_generator)
448
+ self.begin()
449
+
450
+ self.set_epoch(self.iteration)
451
+ dataloader_iter = super().__iter__()
452
+ # We iterate one batch ahead to check when we are at the end
453
+ try:
454
+ current_batch = next(dataloader_iter)
455
+ except StopIteration:
456
+ yield
457
+
458
+ batch_index = 0
459
+ while True:
460
+ try:
461
+ # But we still move it to the device so it is done before `StopIteration` is reached
462
+ if self.device is not None:
463
+ current_batch = send_to_device(current_batch, self.device, non_blocking=self._non_blocking)
464
+ next_batch = next(dataloader_iter)
465
+ if batch_index >= self.skip_batches:
466
+ yield current_batch
467
+ batch_index += 1
468
+ current_batch = next_batch
469
+ except StopIteration:
470
+ self.end_of_dataloader = True
471
+ if batch_index >= self.skip_batches:
472
+ yield current_batch
473
+ break
474
+
475
+ self.iteration += 1
476
+ self.end()
477
+
478
+ def set_epoch(self, epoch: int):
479
+ # In case it is manually passed in, the user can set it to what they like
480
+ if self.iteration != epoch:
481
+ self.iteration = epoch
482
+ if hasattr(self.batch_sampler, "sampler") and hasattr(self.batch_sampler.sampler, "set_epoch"):
483
+ self.batch_sampler.sampler.set_epoch(epoch)
484
+ # We support if a custom `Dataset` implementation has `set_epoch`
485
+ # or in general HF datasets `Datasets`
486
+ elif hasattr(self.dataset, "set_epoch"):
487
+ self.dataset.set_epoch(epoch)
488
+
489
+ @property
490
+ def total_batch_size(self):
491
+ batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler
492
+ return (
493
+ batch_sampler.batch_size
494
+ if getattr(batch_sampler, "split_batches", False)
495
+ else (batch_sampler.batch_size * getattr(batch_sampler, "num_processes", 1))
496
+ )
497
+
498
+ @property
499
+ def total_dataset_length(self):
500
+ if hasattr(self.dataset, "total_length"):
501
+ return self.dataset.total_length
502
+ else:
503
+ return len(self.dataset)
504
+
505
+ def get_sampler(self):
506
+ return get_sampler(self)
507
+
508
+ def set_sampler(self, sampler):
509
+ sampler_is_batch_sampler = isinstance(self.sampler, BatchSampler)
510
+ if sampler_is_batch_sampler:
511
+ self.sampler.sampler = sampler
512
+ else:
513
+ self.batch_sampler.sampler = sampler
514
+ if hasattr(self.batch_sampler, "batch_sampler"):
515
+ self.batch_sampler.batch_sampler.sampler = sampler
516
+
517
+
518
+ if is_torch_xla_available():
519
+ import torch_xla.distributed.parallel_loader as xpl
520
+
521
+ class MpDeviceLoaderWrapper(xpl.MpDeviceLoader):
522
+ """
523
+ Wrapper for the xpl.MpDeviceLoader class that knows the total batch size.
524
+
525
+ XLA preloading threads will all call DataLoaderShard's __iter__(). Remove rng_types from DataLoaderShard to
526
+ prevent it from using the XLA device in the preloading threads, and synchronize the RNG once from the main
527
+ thread only.
528
+
529
+ **Available attributes:**
530
+
531
+ - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.
532
+ Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total
533
+ number of processes
534
+
535
+ - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
536
+ """
537
+
538
+ def __init__(self, dataloader: DataLoaderShard, device: torch.device):
539
+ super().__init__(dataloader, device)
540
+ self._rng_types = self._loader.rng_types
541
+ self._loader.rng_types = None
542
+
543
+ def __iter__(self):
544
+ if self._rng_types is not None:
545
+ synchronize_rng_states(self._rng_types, self._loader.synchronized_generator)
546
+
547
+ return super().__iter__()
548
+
549
+ @property
550
+ def total_batch_size(self):
551
+ return self._loader.total_batch_size
552
+
553
+ @property
554
+ def total_dataset_length(self):
555
+ return self._loader.total_dataset_length
556
+
557
+ @property
558
+ def batch_sampler(self):
559
+ return self._loader.batch_sampler
560
+
561
+
562
+ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
563
+ """
564
+ Subclass of a PyTorch `DataLoader` that will iterate and preprocess on process 0 only, then dispatch on each
565
+ process their part of the batch.
566
+
567
+ Args:
568
+ split_batches (`bool`, *optional*, defaults to `False`):
569
+ Whether the resulting `DataLoader` should split the batches of the original data loader across devices or
570
+ yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of
571
+ `num_processes` batches at each iteration). Another way to see this is that the observed batch size will be
572
+ the same as the initial `dataloader` if this option is set to `True`, the batch size of the initial
573
+ `dataloader` multiplied by `num_processes` otherwise. Setting this option to `True` requires that the batch
574
+ size of the `dataloader` is a round multiple of `batch_size`.
575
+ skip_batches (`int`, *optional*, defaults to 0):
576
+ The number of batches to skip at the beginning of an iteration.
577
+
578
+ **Available attributes:**
579
+
580
+ - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.
581
+ Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total
582
+ number of processes
583
+
584
+ - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
585
+ """
586
+
587
+ def __init__(
588
+ self,
589
+ dataset,
590
+ split_batches: bool = False,
591
+ skip_batches=0,
592
+ _drop_last: bool = False,
593
+ _non_blocking: bool = False,
594
+ slice_fn=None,
595
+ **kwargs,
596
+ ):
597
+ shuffle = False
598
+ if is_torch_version(">=", "1.11.0"):
599
+ from torch.utils.data.datapipes.iter.combinatorics import ShufflerIterDataPipe
600
+
601
+ # We need to save the shuffling state of the DataPipe
602
+ if isinstance(dataset, ShufflerIterDataPipe):
603
+ shuffle = dataset._shuffle_enabled
604
+ super().__init__(dataset, **kwargs)
605
+ self.split_batches = split_batches
606
+ if shuffle:
607
+ torch.utils.data.graph_settings.apply_shuffle_settings(dataset, shuffle=shuffle)
608
+
609
+ self.gradient_state = GradientState()
610
+ self.state = AcceleratorState()
611
+ self._drop_last = _drop_last
612
+ self._non_blocking = _non_blocking
613
+ self.skip_batches = skip_batches
614
+
615
+ self.slice_fn = slice_tensors if slice_fn is None else slice_fn
616
+ self.iteration = 0
617
+
618
+ def _fetch_batches(self, iterator):
619
+ batches, batch = None, None
620
+ # On process 0, we gather the batch to dispatch.
621
+ if self.state.process_index == 0:
622
+ try:
623
+ if self.split_batches:
624
+ # One batch of the main iterator is dispatched and split.
625
+ batch = next(iterator)
626
+ else:
627
+ # num_processes batches of the main iterator are concatenated then dispatched and split.
628
+ # We add the batches one by one so we have the remainder available when drop_last=False.
629
+ batches = []
630
+ for _ in range(self.state.num_processes):
631
+ batches.append(next(iterator))
632
+ try:
633
+ batch = concatenate(batches, dim=0)
634
+ except RuntimeError as e:
635
+ raise RuntimeError(
636
+ "You can't use batches of different size with `dispatch_batches=True` or when using an `IterableDataset`."
637
+ "either pass `dispatch_batches=False` and have each process fetch its own batch "
638
+ " or pass `split_batches=True`. By doing so, the main process will fetch a full batch and "
639
+ "slice it into `num_processes` batches for each process."
640
+ ) from e
641
+ # In both cases, we need to get the structure of the batch that we will broadcast on other
642
+ # processes to initialize the tensors with the right shape.
643
+ # data_structure, stop_iteration
644
+ batch_info = [get_data_structure(batch), False]
645
+ except StopIteration:
646
+ batch_info = [None, True]
647
+ else:
648
+ batch_info = [None, self._stop_iteration]
649
+ # This is inplace, so after this instruction, every process has the same `batch_info` as process 0.
650
+ broadcast_object_list(batch_info)
651
+ self._stop_iteration = batch_info[1]
652
+ if self._stop_iteration:
653
+ # If drop_last is False and split_batches is False, we may have a remainder to take care of.
654
+ if not self.split_batches and not self._drop_last:
655
+ if self.state.process_index == 0 and len(batches) > 0:
656
+ batch = concatenate(batches, dim=0)
657
+ batch_info = [get_data_structure(batch), False]
658
+ else:
659
+ batch_info = [None, True]
660
+ broadcast_object_list(batch_info)
661
+ return batch, batch_info
662
+
663
+ def __iter__(self):
664
+ self.begin()
665
+ self.set_epoch(self.iteration)
666
+ main_iterator = None
667
+ if is_torch_version(">=", "2.0.1"):
668
+ # NOTE PyTorch DataLoader adds forward compatibilities for DataPipes, which broadcasts
669
+ # shared seed to all dist processes. Thus, we need to create iterator for all dist processes.
670
+ # But, we only iterate through the DataLoader on process 0.
671
+ main_iterator = super().__iter__()
672
+ elif self.state.process_index == 0:
673
+ main_iterator = super().__iter__()
674
+ stop_iteration = False
675
+ self._stop_iteration = False
676
+ first_batch = None
677
+ next_batch, next_batch_info = self._fetch_batches(main_iterator)
678
+ batch_index = 0
679
+ while not stop_iteration:
680
+ batch, batch_info = next_batch, next_batch_info
681
+
682
+ if self.state.process_index != 0:
683
+ # Initialize tensors on other processes than process 0.
684
+ batch = initialize_tensors(batch_info[0])
685
+ batch = send_to_device(batch, self.state.device, non_blocking=self._non_blocking)
686
+ # Broadcast the batch before splitting it.
687
+ batch = broadcast(batch, from_process=0)
688
+
689
+ if not self._drop_last and first_batch is None:
690
+ # We keep at least num processes elements of the first batch to be able to complete the last batch
691
+ first_batch = self.slice_fn(
692
+ batch,
693
+ slice(0, self.state.num_processes),
694
+ process_index=self.state.process_index,
695
+ num_processes=self.state.num_processes,
696
+ )
697
+
698
+ if batch is None:
699
+ raise ValueError(
700
+ f"Batch does not contain any data (`{batch}`). At the end of all iterable data available before expected stop iteration."
701
+ )
702
+
703
+ observed_batch_size = find_batch_size(batch)
704
+ batch_size = observed_batch_size // self.state.num_processes
705
+
706
+ stop_iteration = self._stop_iteration
707
+ if not stop_iteration:
708
+ # We may still be at the end of the dataloader without knowing it yet: if there is nothing left in
709
+ # the dataloader since the number of batches is a round multiple of the number of processes.
710
+ next_batch, next_batch_info = self._fetch_batches(main_iterator)
711
+ # next_batch_info[0] is None when there are no more batches, otherwise we still need to process them.
712
+ if self._stop_iteration and next_batch_info[0] is None:
713
+ stop_iteration = True
714
+
715
+ if not self._drop_last and stop_iteration and observed_batch_size % self.state.num_processes != 0:
716
+ # If the last batch is not complete, let's add the first batch to it.
717
+ batch = concatenate([batch, first_batch], dim=0)
718
+ # Batch size computation above is wrong, it's off by 1 so we fix it.
719
+ batch_size += 1
720
+
721
+ data_slice = slice(self.state.process_index * batch_size, (self.state.process_index + 1) * batch_size)
722
+ batch = self.slice_fn(
723
+ batch,
724
+ data_slice,
725
+ process_index=self.state.process_index,
726
+ num_processes=self.state.num_processes,
727
+ )
728
+
729
+ if stop_iteration:
730
+ self.end_of_dataloader = True
731
+ self.remainder = observed_batch_size
732
+ if batch_index >= self.skip_batches:
733
+ yield batch
734
+ batch_index += 1
735
+ self.iteration += 1
736
+ self.end()
737
+
738
+ def set_epoch(self, epoch: int):
739
+ # In case it is manually passed in, the user can set it to what they like
740
+ if self.iteration != epoch:
741
+ self.iteration = epoch
742
+ if hasattr(self.batch_sampler.sampler, "set_epoch"):
743
+ self.batch_sampler.sampler.set_epoch(epoch)
744
+ elif hasattr(self.dataset, "set_epoch"):
745
+ self.dataset.set_epoch(epoch)
746
+
747
+ def __len__(self):
748
+ whole_length = super().__len__()
749
+ if self.split_batches:
750
+ return whole_length
751
+ elif self._drop_last:
752
+ return whole_length // self.state.num_processes
753
+ else:
754
+ return math.ceil(whole_length / self.state.num_processes)
755
+
756
+ @property
757
+ def total_batch_size(self):
758
+ return (
759
+ self.dataset.batch_size if self.split_batches else (self.dataset.batch_size * self.dataset.num_processes)
760
+ )
761
+
762
+ @property
763
+ def total_dataset_length(self):
764
+ return len(self.dataset)
765
+
766
+ def get_sampler(self):
767
+ return get_sampler(self)
768
+
769
+ def set_sampler(self, sampler):
770
+ sampler_is_batch_sampler = isinstance(self.sampler, BatchSampler)
771
+ if sampler_is_batch_sampler:
772
+ self.sampler.sampler = sampler
773
+ else:
774
+ self.batch_sampler.sampler = sampler
775
+ if hasattr(self.batch_sampler, "batch_sampler"):
776
+ self.batch_sampler.batch_sampler.sampler = sampler
777
+
778
+
779
+ def get_sampler(dataloader):
780
+ """
781
+ Get the sampler associated to the dataloader
782
+
783
+ Args:
784
+ dataloader (`torch.utils.data.dataloader.DataLoader`):
785
+ The data loader to split across several devices.
786
+ Returns:
787
+ `torch.utils.data.Sampler`: The sampler associated to the dataloader
788
+ """
789
+ sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler)
790
+ if sampler_is_batch_sampler:
791
+ sampler = getattr(dataloader.sampler, "sampler", None)
792
+ else:
793
+ sampler = getattr(dataloader.batch_sampler, "sampler", None)
794
+ return sampler
795
+
796
+
797
+ def prepare_data_loader(
798
+ dataloader: DataLoader,
799
+ device: Optional[torch.device] = None,
800
+ num_processes: Optional[int] = None,
801
+ process_index: Optional[int] = None,
802
+ split_batches: bool = False,
803
+ put_on_device: bool = False,
804
+ rng_types: Optional[List[Union[str, RNGType]]] = None,
805
+ dispatch_batches: Optional[bool] = None,
806
+ even_batches: bool = True,
807
+ slice_fn_for_dispatch: Optional[Callable] = None,
808
+ use_seedable_sampler: bool = False,
809
+ non_blocking: bool = False,
810
+ ) -> DataLoader:
811
+ """
812
+ Wraps a PyTorch `DataLoader` to generate batches for one of the processes only.
813
+
814
+ Depending on the value of the `drop_last` attribute of the `dataloader` passed, it will either stop the iteration
815
+ at the first batch that would be too small / not present on all processes or loop with indices from the beginning.
816
+
817
+ Args:
818
+ dataloader (`torch.utils.data.dataloader.DataLoader`):
819
+ The data loader to split across several devices.
820
+ device (`torch.device`):
821
+ The target device for the returned `DataLoader`.
822
+ num_processes (`int`, *optional*):
823
+ The number of processes running concurrently. Will default to the value given by
824
+ [`~state.AcceleratorState`].
825
+ process_index (`int`, *optional*):
826
+ The index of the current process. Will default to the value given by [`~state.AcceleratorState`].
827
+ split_batches (`bool`, *optional*, defaults to `False`):
828
+ Whether the resulting `DataLoader` should split the batches of the original data loader across devices or
829
+ yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of
830
+ `num_processes` batches at each iteration).
831
+
832
+ Another way to see this is that the observed batch size will be the same as the initial `dataloader` if
833
+ this option is set to `True`, the batch size of the initial `dataloader` multiplied by `num_processes`
834
+ otherwise.
835
+
836
+ Setting this option to `True` requires that the batch size of the `dataloader` is a round multiple of
837
+ `batch_size`.
838
+ put_on_device (`bool`, *optional*, defaults to `False`):
839
+ Whether or not to put the batches on `device` (only works if the batches are nested list, tuples or
840
+ dictionaries of tensors).
841
+ rng_types (list of `str` or [`~utils.RNGType`]):
842
+ The list of random number generators to synchronize at the beginning of each iteration. Should be one or
843
+ several of:
844
+
845
+ - `"torch"`: the base torch random number generator
846
+ - `"cuda"`: the CUDA random number generator (GPU only)
847
+ - `"xla"`: the XLA random number generator (TPU only)
848
+ - `"generator"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your
849
+ dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type.
850
+
851
+ dispatch_batches (`bool`, *optional*):
852
+ If set to `True`, the datalaoder prepared is only iterated through on the main process and then the batches
853
+ are split and broadcast to each process. Will default to `True` when the underlying dataset is an
854
+ `IterableDataset`, `False` otherwise.
855
+ even_batches (`bool`, *optional*, defaults to `True`):
856
+ If set to `True`, in cases where the total batch size across all processes does not exactly divide the
857
+ dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among
858
+ all workers.
859
+ slice_fn_for_dispatch (`Callable`, *optional*`):
860
+ If passed, this function will be used to slice tensors across `num_processes`. Will default to
861
+ [`~utils.slice_tensors`]. This argument is used only when `dispatch_batches` is set to `True` and will be
862
+ ignored otherwise.
863
+ use_seedable_sampler (`bool`, *optional*, defaults to `False`):
864
+ Whether to use the [`~data_loader.SeedableRandomSampler`] instead of a `RandomSampler` for better
865
+ reproducability. Comes at a cost of potentially different performances due to different shuffling
866
+ algorithms but ensures results will be the *exact* same. Should be paired with `set_seed()` at every
867
+ `self.set_epoch`
868
+ non_blocking (`bool`, *optional*, defaults to `False`):
869
+ If set to `True`, dataloader will utilize non-blocking host-to-device transfers. If the dataloader has
870
+ `pin_memory` set to `True`, this will help to increase overlap between data transfer and computations.
871
+
872
+
873
+ Returns:
874
+ `torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches
875
+
876
+ <Tip warning={true}>
877
+
878
+ `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches`
879
+ equal to `False`
880
+
881
+ </Tip>
882
+ """
883
+ if dispatch_batches is None:
884
+ if not put_on_device:
885
+ dispatch_batches = False
886
+ else:
887
+ dispatch_batches = isinstance(dataloader.dataset, IterableDataset)
888
+
889
+ if dispatch_batches and not put_on_device:
890
+ raise ValueError("Using `dispatch_batches=True` requires `put_on_device=True`.")
891
+ # Grab defaults from AcceleratorState
892
+ state = AcceleratorState()
893
+ if num_processes is None:
894
+ num_processes = state.num_processes
895
+ if process_index is None:
896
+ process_index = state.process_index
897
+
898
+ # Sanity check
899
+ if split_batches:
900
+ if dataloader.batch_size is not None:
901
+ batch_size_for_check = dataloader.batch_size
902
+ else:
903
+ # For custom batch_sampler
904
+ if hasattr(dataloader.batch_sampler, "batch_size"):
905
+ batch_size_for_check = dataloader.batch_sampler.batch_size
906
+ else:
907
+ raise ValueError(
908
+ "In order to use `split_batches==True` you must have a `batch_size` attribute either in the passed "
909
+ "`dataloader` or `dataloader.batch_sampler` objects, and it has to return a natural number. "
910
+ "Your `dataloader.batch_size` is None and `dataloader.batch_sampler` "
911
+ f"(`{type(dataloader.batch_sampler)}`) does not have the `batch_size` attribute set."
912
+ )
913
+
914
+ if batch_size_for_check > 1 and batch_size_for_check % num_processes != 0:
915
+ raise ValueError(
916
+ f"To use a `DataLoader` in `split_batches` mode, the batch size ({dataloader.batch_size}) "
917
+ f"needs to be a round multiple of the number of processes ({num_processes})."
918
+ )
919
+
920
+ new_dataset = dataloader.dataset
921
+ # Iterable dataset doesn't like batch_sampler, but data_loader creates a default one for it
922
+ new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None
923
+ sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler)
924
+ synchronized_generator = None
925
+
926
+ sampler = get_sampler(dataloader)
927
+ if isinstance(sampler, RandomSampler) and use_seedable_sampler:
928
+ # When iterating through the dataloader during distributed processes
929
+ # we want to ensure that on each process we are iterating through the same
930
+ # samples in the same order if a seed is set. This requires a tweak
931
+ # to the `torch.utils.data.RandomSampler` class (if used).
932
+ sampler = SeedableRandomSampler(
933
+ data_source=sampler.data_source,
934
+ replacement=sampler.replacement,
935
+ num_samples=sampler._num_samples,
936
+ generator=getattr(sampler, "generator", torch.Generator()),
937
+ )
938
+
939
+ if isinstance(dataloader.sampler, RandomSampler) and state.distributed_type == DistributedType.XLA:
940
+ # isinstance(dataloader.sampler, RandomSampler) indicates the original dataloader has `shuffle` enabled.
941
+ generator = torch.Generator().manual_seed(42)
942
+ dataloader.generator = generator
943
+ dataloader.sampler.generator = generator
944
+ # No change if no multiprocess
945
+ if (num_processes != 1 or state.distributed_type == DistributedType.MEGATRON_LM) and not dispatch_batches:
946
+ if isinstance(new_dataset, IterableDataset):
947
+ if getattr(dataloader.dataset, "generator", None) is not None:
948
+ synchronized_generator = dataloader.dataset.generator
949
+ new_dataset = IterableDatasetShard(
950
+ new_dataset,
951
+ batch_size=dataloader.batch_size,
952
+ drop_last=dataloader.drop_last,
953
+ num_processes=num_processes,
954
+ process_index=process_index,
955
+ split_batches=split_batches,
956
+ )
957
+ else:
958
+ if not use_seedable_sampler and hasattr(sampler, "generator"):
959
+ if sampler.generator is None:
960
+ sampler.generator = torch.Generator()
961
+ synchronized_generator = sampler.generator
962
+ batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler
963
+ new_batch_sampler = BatchSamplerShard(
964
+ batch_sampler,
965
+ num_processes=num_processes,
966
+ process_index=process_index,
967
+ split_batches=split_batches,
968
+ even_batches=even_batches,
969
+ )
970
+
971
+ # We ignore all of those since they are all dealt with by our new_batch_sampler
972
+ ignore_kwargs = [
973
+ "batch_size",
974
+ "shuffle",
975
+ "sampler",
976
+ "batch_sampler",
977
+ "drop_last",
978
+ ]
979
+
980
+ if rng_types is not None and synchronized_generator is None and "generator" in rng_types:
981
+ rng_types.remove("generator")
982
+
983
+ kwargs = {
984
+ k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k])
985
+ for k in _PYTORCH_DATALOADER_KWARGS
986
+ if k not in ignore_kwargs
987
+ }
988
+
989
+ # Need to provide batch_size as batch_sampler is None for Iterable dataset
990
+ if new_batch_sampler is None:
991
+ kwargs["drop_last"] = dataloader.drop_last
992
+ kwargs["batch_size"] = (
993
+ dataloader.batch_size // num_processes if split_batches and not dispatch_batches else dataloader.batch_size
994
+ )
995
+ if dispatch_batches:
996
+ kwargs.pop("generator")
997
+ dataloader = DataLoaderDispatcher(
998
+ new_dataset,
999
+ split_batches=split_batches,
1000
+ batch_sampler=new_batch_sampler,
1001
+ _drop_last=dataloader.drop_last,
1002
+ _non_blocking=non_blocking,
1003
+ slice_fn=slice_fn_for_dispatch,
1004
+ **kwargs,
1005
+ )
1006
+ elif sampler_is_batch_sampler:
1007
+ dataloader = DataLoaderShard(
1008
+ new_dataset,
1009
+ device=device if put_on_device and state.distributed_type != DistributedType.XLA else None,
1010
+ sampler=new_batch_sampler,
1011
+ batch_size=dataloader.batch_size,
1012
+ rng_types=rng_types,
1013
+ _drop_last=dataloader.drop_last,
1014
+ _non_blocking=non_blocking,
1015
+ synchronized_generator=synchronized_generator,
1016
+ **kwargs,
1017
+ )
1018
+ else:
1019
+ dataloader = DataLoaderShard(
1020
+ new_dataset,
1021
+ device=device if put_on_device and state.distributed_type != DistributedType.XLA else None,
1022
+ batch_sampler=new_batch_sampler,
1023
+ rng_types=rng_types,
1024
+ synchronized_generator=synchronized_generator,
1025
+ _drop_last=dataloader.drop_last,
1026
+ _non_blocking=non_blocking,
1027
+ **kwargs,
1028
+ )
1029
+
1030
+ if isinstance(sampler, SeedableRandomSampler) and use_seedable_sampler:
1031
+ dataloader.set_sampler(sampler)
1032
+ if state.distributed_type == DistributedType.XLA:
1033
+ return MpDeviceLoaderWrapper(dataloader, device)
1034
+ return dataloader
1035
+
1036
+
1037
+ class SkipBatchSampler(BatchSampler):
1038
+ """
1039
+ A `torch.utils.data.BatchSampler` that skips the first `n` batches of another `torch.utils.data.BatchSampler`.
1040
+ """
1041
+
1042
+ def __init__(self, batch_sampler, skip_batches=0):
1043
+ self.batch_sampler = batch_sampler
1044
+ self.skip_batches = skip_batches
1045
+
1046
+ def __iter__(self):
1047
+ for index, samples in enumerate(self.batch_sampler):
1048
+ if index >= self.skip_batches:
1049
+ yield samples
1050
+
1051
+ @property
1052
+ def total_length(self):
1053
+ return len(self.batch_sampler)
1054
+
1055
+ def __len__(self):
1056
+ return len(self.batch_sampler) - self.skip_batches
1057
+
1058
+
1059
+ class SkipDataLoader(DataLoader):
1060
+ """
1061
+ Subclass of a PyTorch `DataLoader` that will skip the first batches.
1062
+
1063
+ Args:
1064
+ dataset (`torch.utils.data.dataset.Dataset`):
1065
+ The dataset to use to build this datalaoder.
1066
+ skip_batches (`int`, *optional*, defaults to 0):
1067
+ The number of batches to skip at the beginning.
1068
+ kwargs:
1069
+ All other keyword arguments to pass to the regular `DataLoader` initialization.
1070
+ """
1071
+
1072
+ def __init__(self, dataset, skip_batches=0, **kwargs):
1073
+ super().__init__(dataset, **kwargs)
1074
+ self.skip_batches = skip_batches
1075
+
1076
+ def __iter__(self):
1077
+ for index, batch in enumerate(super().__iter__()):
1078
+ if index >= self.skip_batches:
1079
+ yield batch
1080
+
1081
+
1082
+ def skip_first_batches(dataloader, num_batches=0):
1083
+ """
1084
+ Creates a `torch.utils.data.DataLoader` that will efficiently skip the first `num_batches`.
1085
+ """
1086
+ dataset = dataloader.dataset
1087
+ sampler_is_batch_sampler = False
1088
+ if isinstance(dataset, IterableDataset):
1089
+ new_batch_sampler = None
1090
+ else:
1091
+ sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler)
1092
+ batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler
1093
+ new_batch_sampler = SkipBatchSampler(batch_sampler, skip_batches=num_batches)
1094
+
1095
+ # We ignore all of those since they are all dealt with by our new_batch_sampler
1096
+ ignore_kwargs = [
1097
+ "batch_size",
1098
+ "shuffle",
1099
+ "sampler",
1100
+ "batch_sampler",
1101
+ "drop_last",
1102
+ ]
1103
+
1104
+ kwargs = {
1105
+ k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k])
1106
+ for k in _PYTORCH_DATALOADER_KWARGS
1107
+ if k not in ignore_kwargs
1108
+ }
1109
+
1110
+ # Need to provide batch_size as batch_sampler is None for Iterable dataset
1111
+ if new_batch_sampler is None:
1112
+ kwargs["drop_last"] = dataloader.drop_last
1113
+ kwargs["batch_size"] = dataloader.batch_size
1114
+
1115
+ if isinstance(dataloader, DataLoaderDispatcher):
1116
+ if new_batch_sampler is None:
1117
+ # Need to manually skip batches in the dataloader
1118
+ kwargs["skip_batches"] = num_batches
1119
+ dataloader = DataLoaderDispatcher(
1120
+ dataset,
1121
+ split_batches=dataloader.split_batches,
1122
+ batch_sampler=new_batch_sampler,
1123
+ _drop_last=dataloader._drop_last,
1124
+ **kwargs,
1125
+ )
1126
+ elif isinstance(dataloader, DataLoaderShard):
1127
+ if new_batch_sampler is None:
1128
+ # Need to manually skip batches in the dataloader
1129
+ kwargs["skip_batches"] = num_batches
1130
+ elif sampler_is_batch_sampler:
1131
+ kwargs["sampler"] = new_batch_sampler
1132
+ kwargs["batch_size"] = dataloader.batch_size
1133
+ else:
1134
+ kwargs["batch_sampler"] = new_batch_sampler
1135
+ dataloader = DataLoaderShard(
1136
+ dataset,
1137
+ device=dataloader.device,
1138
+ rng_types=dataloader.rng_types,
1139
+ synchronized_generator=dataloader.synchronized_generator,
1140
+ **kwargs,
1141
+ )
1142
+ else:
1143
+ if new_batch_sampler is None:
1144
+ # Need to manually skip batches in the dataloader
1145
+ dataloader = SkipDataLoader(dataset, skip_batches=num_batches, **kwargs)
1146
+ else:
1147
+ dataloader = DataLoader(dataset, batch_sampler=new_batch_sampler, **kwargs)
1148
+
1149
+ return dataloader
llmeval-env/lib/python3.10/site-packages/accelerate/hooks.py ADDED
@@ -0,0 +1,709 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import functools
16
+ from typing import Dict, List, Mapping, Optional, Union
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+
21
+ from .state import PartialState
22
+ from .utils import (
23
+ PrefixedDataset,
24
+ find_device,
25
+ named_module_tensors,
26
+ send_to_device,
27
+ set_module_tensor_to_device,
28
+ )
29
+ from .utils.modeling import get_non_persistent_buffers
30
+ from .utils.other import recursive_getattr
31
+
32
+
33
+ class ModelHook:
34
+ """
35
+ A hook that contains callbacks to be executed just before and after the forward method of a model. The difference
36
+ with PyTorch existing hooks is that they get passed along the kwargs.
37
+
38
+ Class attribute:
39
+ - **no_grad** (`bool`, *optional*, defaults to `False`) -- Whether or not to execute the actual forward pass under
40
+ the `torch.no_grad()` context manager.
41
+ """
42
+
43
+ no_grad = False
44
+
45
+ def init_hook(self, module):
46
+ """
47
+ To be executed when the hook is attached to the module.
48
+
49
+ Args:
50
+ module (`torch.nn.Module`): The module attached to this hook.
51
+ """
52
+ return module
53
+
54
+ def pre_forward(self, module, *args, **kwargs):
55
+ """
56
+ To be executed just before the forward method of the model.
57
+
58
+ Args:
59
+ module (`torch.nn.Module`): The module whose forward pass will be executed just after this event.
60
+ args (`Tuple[Any]`): The positional arguments passed to the module.
61
+ kwargs (`Dict[Str, Any]`): The keyword arguments passed to the module.
62
+
63
+ Returns:
64
+ `Tuple[Tuple[Any], Dict[Str, Any]]`: A tuple with the treated `args` and `kwargs`.
65
+ """
66
+ return args, kwargs
67
+
68
+ def post_forward(self, module, output):
69
+ """
70
+ To be executed just after the forward method of the model.
71
+
72
+ Args:
73
+ module (`torch.nn.Module`): The module whose forward pass been executed just before this event.
74
+ output (`Any`): The output of the module.
75
+
76
+ Returns:
77
+ `Any`: The processed `output`.
78
+ """
79
+ return output
80
+
81
+ def detach_hook(self, module):
82
+ """
83
+ To be executed when the hook is detached from a module.
84
+
85
+ Args:
86
+ module (`torch.nn.Module`): The module detached from this hook.
87
+ """
88
+ return module
89
+
90
+
91
+ class SequentialHook(ModelHook):
92
+ """
93
+ A hook that can contain several hooks and iterates through them at each event.
94
+ """
95
+
96
+ def __init__(self, *hooks):
97
+ self.hooks = hooks
98
+
99
+ def init_hook(self, module):
100
+ for hook in self.hooks:
101
+ module = hook.init_hook(module)
102
+ return module
103
+
104
+ def pre_forward(self, module, *args, **kwargs):
105
+ for hook in self.hooks:
106
+ args, kwargs = hook.pre_forward(module, *args, **kwargs)
107
+ return args, kwargs
108
+
109
+ def post_forward(self, module, output):
110
+ for hook in self.hooks:
111
+ output = hook.post_forward(module, output)
112
+ return output
113
+
114
+ def detach_hook(self, module):
115
+ for hook in self.hooks:
116
+ module = hook.detach_hook(module)
117
+ return module
118
+
119
+
120
+ def add_hook_to_module(module: nn.Module, hook: ModelHook, append: bool = False):
121
+ """
122
+ Adds a hook to a given module. This will rewrite the `forward` method of the module to include the hook, to remove
123
+ this behavior and restore the original `forward` method, use `remove_hook_from_module`.
124
+
125
+ <Tip warning={true}>
126
+
127
+ If the module already contains a hook, this will replace it with the new hook passed by default. To chain two hooks
128
+ together, pass `append=True`, so it chains the current and new hook into an instance of the `SequentialHook` class.
129
+
130
+ </Tip>
131
+
132
+ Args:
133
+ module (`torch.nn.Module`):
134
+ The module to attach a hook to.
135
+ hook (`ModelHook`):
136
+ The hook to attach.
137
+ append (`bool`, *optional*, defaults to `False`):
138
+ Whether the hook should be chained with an existing one (if module already contains a hook) or not.
139
+
140
+ Returns:
141
+ `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can
142
+ be discarded).
143
+ """
144
+
145
+ if append and (getattr(module, "_hf_hook", None) is not None):
146
+ old_hook = module._hf_hook
147
+ remove_hook_from_module(module)
148
+ hook = SequentialHook(old_hook, hook)
149
+
150
+ if hasattr(module, "_hf_hook") and hasattr(module, "_old_forward"):
151
+ # If we already put some hook on this module, we replace it with the new one.
152
+ old_forward = module._old_forward
153
+ else:
154
+ old_forward = module.forward
155
+ module._old_forward = old_forward
156
+
157
+ module = hook.init_hook(module)
158
+ module._hf_hook = hook
159
+
160
+ def new_forward(module, *args, **kwargs):
161
+ args, kwargs = module._hf_hook.pre_forward(module, *args, **kwargs)
162
+ if module._hf_hook.no_grad:
163
+ with torch.no_grad():
164
+ output = module._old_forward(*args, **kwargs)
165
+ else:
166
+ output = module._old_forward(*args, **kwargs)
167
+ return module._hf_hook.post_forward(module, output)
168
+
169
+ # Overriding a GraphModuleImpl forward freezes the forward call and later modifications on the graph will fail.
170
+ # Reference: https://pytorch.slack.com/archives/C3PDTEV8E/p1705929610405409
171
+ if "GraphModuleImpl" in str(type(module)):
172
+ module.__class__.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward)
173
+ else:
174
+ module.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward)
175
+
176
+ return module
177
+
178
+
179
+ def remove_hook_from_module(module: nn.Module, recurse=False):
180
+ """
181
+ Removes any hook attached to a module via `add_hook_to_module`.
182
+
183
+ Args:
184
+ module (`torch.nn.Module`): The module to attach a hook to.
185
+ recurse (`bool`, **optional**): Whether to remove the hooks recursively
186
+
187
+ Returns:
188
+ `torch.nn.Module`: The same module, with the hook detached (the module is modified in place, so the result can
189
+ be discarded).
190
+ """
191
+
192
+ if hasattr(module, "_hf_hook"):
193
+ module._hf_hook.detach_hook(module)
194
+ delattr(module, "_hf_hook")
195
+
196
+ if hasattr(module, "_old_forward"):
197
+ # Overriding a GraphModuleImpl forward freezes the forward call and later modifications on the graph will fail.
198
+ # Reference: https://pytorch.slack.com/archives/C3PDTEV8E/p1705929610405409
199
+ if "GraphModuleImpl" in str(type(module)):
200
+ module.__class__.forward = module._old_forward
201
+ else:
202
+ module.forward = module._old_forward
203
+ delattr(module, "_old_forward")
204
+
205
+ if recurse:
206
+ for child in module.children():
207
+ remove_hook_from_module(child, recurse)
208
+
209
+ return module
210
+
211
+
212
+ class AlignDevicesHook(ModelHook):
213
+ """
214
+ A generic `ModelHook` that ensures inputs and model weights are on the same device for the forward pass of the
215
+ associated module, potentially offloading the weights after the forward pass.
216
+
217
+ Args:
218
+ execution_device (`torch.device`, *optional*):
219
+ The device on which inputs and model weights should be placed before the forward pass.
220
+ offload (`bool`, *optional*, defaults to `False`):
221
+ Whether or not the weights should be offloaded after the forward pass.
222
+ io_same_device (`bool`, *optional*, defaults to `False`):
223
+ Whether or not the output should be placed on the same device as the input was.
224
+ weights_map (`Mapping[str, torch.Tensor]`, *optional*):
225
+ When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.
226
+ offload_buffers (`bool`, *optional*, defaults to `False`):
227
+ Whether or not to include the associated module's buffers when offloading.
228
+ place_submodules (`bool`, *optional*, defaults to `False`):
229
+ Whether to place the submodules on `execution_device` during the `init_hook` event.
230
+ """
231
+
232
+ def __init__(
233
+ self,
234
+ execution_device: Optional[Union[int, str, torch.device]] = None,
235
+ offload: bool = False,
236
+ io_same_device: bool = False,
237
+ weights_map: Optional[Mapping] = None,
238
+ offload_buffers: bool = False,
239
+ place_submodules: bool = False,
240
+ skip_keys: Optional[Union[str, List[str]]] = None,
241
+ tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
242
+ ):
243
+ self.execution_device = execution_device
244
+ self.offload = offload
245
+ self.io_same_device = io_same_device
246
+ self.weights_map = weights_map
247
+ self.offload_buffers = offload_buffers
248
+ self.place_submodules = place_submodules
249
+ self.skip_keys = skip_keys
250
+
251
+ # Will contain the input device when `io_same_device=True`.
252
+ self.input_device = None
253
+ self.param_original_devices = {}
254
+ self.buffer_original_devices = {}
255
+ self.tied_params_names = set()
256
+
257
+ # The hook pre_forward/post_forward need to have knowledge of this dictionary, as with offloading we want to avoid duplicating memory
258
+ # for tied weights already loaded on the target execution device.
259
+ self.tied_params_map = tied_params_map
260
+
261
+ def __repr__(self):
262
+ return (
263
+ f"AlignDevicesHook(execution_device={self.execution_device}, offload={self.offload}, "
264
+ f"io_same_device={self.io_same_device}, offload_buffers={self.offload_buffers}, "
265
+ f"place_submodules={self.place_submodules}, skip_keys={repr(self.skip_keys)})"
266
+ )
267
+
268
+ def init_hook(self, module):
269
+ # In case the AlignDevicesHook is on meta device, ignore tied weights as data_ptr() is then always zero.
270
+ if self.execution_device == "meta" or self.execution_device == torch.device("meta"):
271
+ self.tied_params_map = None
272
+
273
+ if not self.offload and self.execution_device is not None:
274
+ for name, _ in named_module_tensors(module, recurse=self.place_submodules):
275
+ set_module_tensor_to_device(module, name, self.execution_device, tied_params_map=self.tied_params_map)
276
+ elif self.offload:
277
+ self.original_devices = {
278
+ name: param.device for name, param in named_module_tensors(module, recurse=self.place_submodules)
279
+ }
280
+ if self.weights_map is None:
281
+ self.weights_map = {
282
+ name: param.to("cpu")
283
+ for name, param in named_module_tensors(
284
+ module, include_buffers=self.offload_buffers, recurse=self.place_submodules
285
+ )
286
+ }
287
+ for name, _ in named_module_tensors(
288
+ module, include_buffers=self.offload_buffers, recurse=self.place_submodules, remove_non_persistent=True
289
+ ):
290
+ # When using disk offloading, we can not rely on `weights_map[name].data_ptr()` as the reference pointer,
291
+ # as we have no guarantee that safetensors' `file.get_tensor()` will always give the same pointer.
292
+ # As we have no reliable way to track the shared data pointer of tied weights in this case, we use tied_params_names: List[str]
293
+ # to add on the fly pointers to `tied_params_map` in the pre_forward call.
294
+ if (
295
+ self.tied_params_map is not None
296
+ and recursive_getattr(module, name).data_ptr() in self.tied_params_map
297
+ ):
298
+ self.tied_params_names.add(name)
299
+
300
+ set_module_tensor_to_device(module, name, "meta")
301
+
302
+ if not self.offload_buffers and self.execution_device is not None:
303
+ for name, _ in module.named_buffers(recurse=self.place_submodules):
304
+ set_module_tensor_to_device(
305
+ module, name, self.execution_device, tied_params_map=self.tied_params_map
306
+ )
307
+ elif self.offload_buffers and self.execution_device is not None:
308
+ for name in get_non_persistent_buffers(module, recurse=self.place_submodules):
309
+ set_module_tensor_to_device(
310
+ module, name, self.execution_device, tied_params_map=self.tied_params_map
311
+ )
312
+
313
+ return module
314
+
315
+ def pre_forward(self, module, *args, **kwargs):
316
+ if self.io_same_device:
317
+ self.input_device = find_device([args, kwargs])
318
+ if self.offload:
319
+ self.tied_pointers_to_remove = set()
320
+
321
+ for name, _ in named_module_tensors(
322
+ module,
323
+ include_buffers=self.offload_buffers,
324
+ recurse=self.place_submodules,
325
+ remove_non_persistent=True,
326
+ ):
327
+ fp16_statistics = None
328
+ value = self.weights_map[name]
329
+ if "weight" in name and name.replace("weight", "SCB") in self.weights_map.keys():
330
+ if value.dtype == torch.int8:
331
+ fp16_statistics = self.weights_map[name.replace("weight", "SCB")]
332
+
333
+ # In case we are using offloading with tied weights, we need to keep track of the offloaded weights
334
+ # that are loaded on device at this point, as we will need to remove them as well from the dictionary
335
+ # self.tied_params_map in order to allow to free memory.
336
+ if name in self.tied_params_names and value.data_ptr() not in self.tied_params_map:
337
+ self.tied_params_map[value.data_ptr()] = {}
338
+
339
+ if (
340
+ value is not None
341
+ and self.tied_params_map is not None
342
+ and value.data_ptr() in self.tied_params_map
343
+ and self.execution_device not in self.tied_params_map[value.data_ptr()]
344
+ ):
345
+ self.tied_pointers_to_remove.add((value.data_ptr(), self.execution_device))
346
+
347
+ set_module_tensor_to_device(
348
+ module,
349
+ name,
350
+ self.execution_device,
351
+ value=value,
352
+ fp16_statistics=fp16_statistics,
353
+ tied_params_map=self.tied_params_map,
354
+ )
355
+
356
+ return send_to_device(args, self.execution_device), send_to_device(
357
+ kwargs, self.execution_device, skip_keys=self.skip_keys
358
+ )
359
+
360
+ def post_forward(self, module, output):
361
+ if self.offload:
362
+ for name, _ in named_module_tensors(
363
+ module,
364
+ include_buffers=self.offload_buffers,
365
+ recurse=self.place_submodules,
366
+ remove_non_persistent=True,
367
+ ):
368
+ set_module_tensor_to_device(module, name, "meta")
369
+ if type(module).__name__ == "Linear8bitLt":
370
+ module.state.SCB = None
371
+ module.state.CxB = None
372
+
373
+ # We may have loaded tied weights into self.tied_params_map (avoiding to load them several times in e.g. submodules): remove them from
374
+ # this dictionary to allow the garbage collector to do its job.
375
+ for value_pointer, device in self.tied_pointers_to_remove:
376
+ del self.tied_params_map[value_pointer][device]
377
+ self.tied_pointers_to_remove = set()
378
+
379
+ if self.io_same_device and self.input_device is not None:
380
+ output = send_to_device(output, self.input_device, skip_keys=self.skip_keys)
381
+
382
+ return output
383
+
384
+ def detach_hook(self, module):
385
+ if self.offload:
386
+ for name, device in self.original_devices.items():
387
+ if device != torch.device("meta"):
388
+ set_module_tensor_to_device(module, name, device, value=self.weights_map.get(name, None))
389
+ return module
390
+
391
+
392
+ def attach_execution_device_hook(
393
+ module: torch.nn.Module,
394
+ execution_device: Union[int, str, torch.device],
395
+ skip_keys: Optional[Union[str, List[str]]] = None,
396
+ preload_module_classes: Optional[List[str]] = None,
397
+ tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
398
+ ):
399
+ """
400
+ Recursively attaches `AlignDevicesHook` to all submodules of a given model to make sure they have the right
401
+ execution device
402
+
403
+ Args:
404
+ module (`torch.nn.Module`):
405
+ The module where we want to attach the hooks.
406
+ execution_device (`int`, `str` or `torch.device`):
407
+ The device on which inputs and model weights should be placed before the forward pass.
408
+ skip_keys (`str` or `List[str]`, *optional*):
409
+ A list of keys to ignore when moving inputs or outputs between devices.
410
+ preload_module_classes (`List[str]`, *optional*):
411
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
412
+ of the forward. This should only be used for classes that have submodules which are registered but not
413
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
414
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
415
+ tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`):
416
+ A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution
417
+ device, this parameter is useful to reuse the first available pointer of a shared weight for all others,
418
+ instead of duplicating memory.
419
+ """
420
+ if not hasattr(module, "_hf_hook") and len(module.state_dict()) > 0:
421
+ add_hook_to_module(
422
+ module,
423
+ AlignDevicesHook(execution_device, skip_keys=skip_keys, tied_params_map=tied_params_map),
424
+ )
425
+
426
+ # Break the recursion if we get to a preload module.
427
+ if preload_module_classes is not None and module.__class__.__name__ in preload_module_classes:
428
+ return
429
+
430
+ for child in module.children():
431
+ attach_execution_device_hook(child, execution_device, tied_params_map=tied_params_map)
432
+
433
+
434
+ def attach_align_device_hook(
435
+ module: torch.nn.Module,
436
+ execution_device: Optional[torch.device] = None,
437
+ offload: bool = False,
438
+ weights_map: Optional[Mapping] = None,
439
+ offload_buffers: bool = False,
440
+ module_name: str = "",
441
+ skip_keys: Optional[Union[str, List[str]]] = None,
442
+ preload_module_classes: Optional[List[str]] = None,
443
+ tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
444
+ ):
445
+ """
446
+ Recursively attaches `AlignDevicesHook` to all submodules of a given model that have direct parameters and/or
447
+ buffers.
448
+
449
+ Args:
450
+ module (`torch.nn.Module`):
451
+ The module where we want to attach the hooks.
452
+ execution_device (`torch.device`, *optional*):
453
+ The device on which inputs and model weights should be placed before the forward pass.
454
+ offload (`bool`, *optional*, defaults to `False`):
455
+ Whether or not the weights should be offloaded after the forward pass.
456
+ weights_map (`Mapping[str, torch.Tensor]`, *optional*):
457
+ When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.
458
+ offload_buffers (`bool`, *optional*, defaults to `False`):
459
+ Whether or not to include the associated module's buffers when offloading.
460
+ module_name (`str`, *optional*, defaults to `""`):
461
+ The name of the module.
462
+ skip_keys (`str` or `List[str]`, *optional*):
463
+ A list of keys to ignore when moving inputs or outputs between devices.
464
+ preload_module_classes (`List[str]`, *optional*):
465
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
466
+ of the forward. This should only be used for classes that have submodules which are registered but not
467
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
468
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
469
+ tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`):
470
+ A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution
471
+ device, this parameter is useful to reuse the first available pointer of a shared weight for all others,
472
+ instead of duplicating memory.
473
+ """
474
+ # Attach the hook on this module if it has any direct tensor.
475
+ directs = named_module_tensors(module)
476
+ full_offload = (
477
+ offload and preload_module_classes is not None and module.__class__.__name__ in preload_module_classes
478
+ )
479
+
480
+ if len(list(directs)) > 0 or full_offload:
481
+ if weights_map is not None:
482
+ prefix = f"{module_name}." if len(module_name) > 0 else ""
483
+ prefixed_weights_map = PrefixedDataset(weights_map, prefix)
484
+ else:
485
+ prefixed_weights_map = None
486
+ hook = AlignDevicesHook(
487
+ execution_device=execution_device,
488
+ offload=offload,
489
+ weights_map=prefixed_weights_map,
490
+ offload_buffers=offload_buffers,
491
+ place_submodules=full_offload,
492
+ skip_keys=skip_keys,
493
+ tied_params_map=tied_params_map,
494
+ )
495
+ add_hook_to_module(module, hook, append=True)
496
+
497
+ # We stop the recursion in case we hit the full offload.
498
+ if full_offload:
499
+ return
500
+
501
+ # Recurse on all children of the module.
502
+ for child_name, child in module.named_children():
503
+ child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name
504
+ attach_align_device_hook(
505
+ child,
506
+ execution_device=execution_device,
507
+ offload=offload,
508
+ weights_map=weights_map,
509
+ offload_buffers=offload_buffers,
510
+ module_name=child_name,
511
+ preload_module_classes=preload_module_classes,
512
+ skip_keys=skip_keys,
513
+ tied_params_map=tied_params_map,
514
+ )
515
+
516
+
517
+ def remove_hook_from_submodules(module: nn.Module):
518
+ """
519
+ Recursively removes all hooks attached on the submodules of a given model.
520
+
521
+ Args:
522
+ module (`torch.nn.Module`): The module on which to remove all hooks.
523
+ """
524
+ remove_hook_from_module(module)
525
+ for child in module.children():
526
+ remove_hook_from_submodules(child)
527
+
528
+
529
+ def attach_align_device_hook_on_blocks(
530
+ module: nn.Module,
531
+ execution_device: Optional[Union[torch.device, Dict[str, torch.device]]] = None,
532
+ offload: Union[bool, Dict[str, bool]] = False,
533
+ weights_map: Mapping = None,
534
+ offload_buffers: bool = False,
535
+ module_name: str = "",
536
+ skip_keys: Optional[Union[str, List[str]]] = None,
537
+ preload_module_classes: Optional[List[str]] = None,
538
+ tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
539
+ ):
540
+ """
541
+ Attaches `AlignDevicesHook` to all blocks of a given model as needed.
542
+
543
+ Args:
544
+ module (`torch.nn.Module`):
545
+ The module where we want to attach the hooks.
546
+ execution_device (`torch.device` or `Dict[str, torch.device]`, *optional*):
547
+ The device on which inputs and model weights should be placed before the forward pass. It can be one device
548
+ for the whole module, or a dictionary mapping module name to device.
549
+ offload (`bool`, *optional*, defaults to `False`):
550
+ Whether or not the weights should be offloaded after the forward pass. It can be one boolean for the whole
551
+ module, or a dictionary mapping module name to boolean.
552
+ weights_map (`Mapping[str, torch.Tensor]`, *optional*):
553
+ When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.
554
+ offload_buffers (`bool`, *optional*, defaults to `False`):
555
+ Whether or not to include the associated module's buffers when offloading.
556
+ module_name (`str`, *optional*, defaults to `""`):
557
+ The name of the module.
558
+ skip_keys (`str` or `List[str]`, *optional*):
559
+ A list of keys to ignore when moving inputs or outputs between devices.
560
+ preload_module_classes (`List[str]`, *optional*):
561
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
562
+ of the forward. This should only be used for classes that have submodules which are registered but not
563
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
564
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
565
+ tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`):
566
+ A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution
567
+ device, this parameter is useful to reuse the first available pointer of a shared weight for all others,
568
+ instead of duplicating memory.
569
+ """
570
+ # If one device and one offload, we've got one hook.
571
+ if not isinstance(execution_device, Mapping) and not isinstance(offload, dict):
572
+ if not offload:
573
+ hook = AlignDevicesHook(
574
+ execution_device=execution_device,
575
+ io_same_device=True,
576
+ skip_keys=skip_keys,
577
+ place_submodules=True,
578
+ tied_params_map=tied_params_map,
579
+ )
580
+ add_hook_to_module(module, hook)
581
+ else:
582
+ attach_align_device_hook(
583
+ module,
584
+ execution_device=execution_device,
585
+ offload=True,
586
+ weights_map=weights_map,
587
+ offload_buffers=offload_buffers,
588
+ module_name=module_name,
589
+ skip_keys=skip_keys,
590
+ tied_params_map=tied_params_map,
591
+ )
592
+ return
593
+
594
+ if not isinstance(execution_device, Mapping):
595
+ execution_device = {key: execution_device for key in offload.keys()}
596
+ if not isinstance(offload, Mapping):
597
+ offload = {key: offload for key in execution_device.keys()}
598
+
599
+ if module_name in execution_device and module_name in offload and not offload[module_name]:
600
+ hook = AlignDevicesHook(
601
+ execution_device=execution_device[module_name],
602
+ offload_buffers=offload_buffers,
603
+ io_same_device=(module_name == ""),
604
+ place_submodules=True,
605
+ skip_keys=skip_keys,
606
+ tied_params_map=tied_params_map,
607
+ )
608
+ add_hook_to_module(module, hook)
609
+ attach_execution_device_hook(module, execution_device[module_name], tied_params_map=tied_params_map)
610
+ elif module_name in execution_device and module_name in offload:
611
+ attach_align_device_hook(
612
+ module,
613
+ execution_device=execution_device[module_name],
614
+ offload=True,
615
+ weights_map=weights_map,
616
+ offload_buffers=offload_buffers,
617
+ module_name=module_name,
618
+ skip_keys=skip_keys,
619
+ preload_module_classes=preload_module_classes,
620
+ tied_params_map=tied_params_map,
621
+ )
622
+ if not hasattr(module, "_hf_hook"):
623
+ hook = AlignDevicesHook(
624
+ execution_device=execution_device[module_name],
625
+ io_same_device=(module_name == ""),
626
+ skip_keys=skip_keys,
627
+ tied_params_map=tied_params_map,
628
+ )
629
+ add_hook_to_module(module, hook)
630
+ attach_execution_device_hook(
631
+ module,
632
+ execution_device[module_name],
633
+ preload_module_classes=preload_module_classes,
634
+ skip_keys=skip_keys,
635
+ tied_params_map=tied_params_map,
636
+ )
637
+ elif module_name == "":
638
+ hook = AlignDevicesHook(
639
+ execution_device=execution_device.get(""),
640
+ io_same_device=True,
641
+ skip_keys=skip_keys,
642
+ tied_params_map=tied_params_map,
643
+ )
644
+ add_hook_to_module(module, hook)
645
+
646
+ for child_name, child in module.named_children():
647
+ child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name
648
+ attach_align_device_hook_on_blocks(
649
+ child,
650
+ execution_device=execution_device,
651
+ offload=offload,
652
+ weights_map=weights_map,
653
+ offload_buffers=offload_buffers,
654
+ module_name=child_name,
655
+ preload_module_classes=preload_module_classes,
656
+ skip_keys=skip_keys,
657
+ tied_params_map=tied_params_map,
658
+ )
659
+
660
+
661
+ class CpuOffload(ModelHook):
662
+ """
663
+ Offloads a model on the CPU until its forward pass is called. The model will not be offloaded back to the CPU after
664
+ the forward, the user needs to call the `init_hook` method again for this.
665
+
666
+ Args:
667
+ execution_device(`str`, `int` or `torch.device`, *optional*):
668
+ The device on which the model should be executed. Will default to the MPS device if it's available, then
669
+ GPU 0 if there is a GPU, and finally to the CPU.
670
+ prev_module_hook (`UserCpuOffloadHook`, *optional*):
671
+ The hook sent back by [`cpu_offload_with_hook`] for a previous model in the pipeline you are running. If
672
+ passed, its offload method will be called just before the forward of the model to which this hook is
673
+ attached.
674
+ """
675
+
676
+ def __init__(
677
+ self,
678
+ execution_device: Optional[Union[str, int, torch.device]] = None,
679
+ prev_module_hook: Optional["UserCpuOffloadHook"] = None,
680
+ ):
681
+ self.prev_module_hook = prev_module_hook
682
+
683
+ self.execution_device = execution_device if execution_device is not None else PartialState().default_device
684
+
685
+ def init_hook(self, module):
686
+ return module.to("cpu")
687
+
688
+ def pre_forward(self, module, *args, **kwargs):
689
+ if self.prev_module_hook is not None:
690
+ self.prev_module_hook.offload()
691
+ module.to(self.execution_device)
692
+ return send_to_device(args, self.execution_device), send_to_device(kwargs, self.execution_device)
693
+
694
+
695
+ class UserCpuOffloadHook:
696
+ """
697
+ A simple hook grouping a model and a `ModelHook`, which provides easy APIs for to call the init method of the hook
698
+ or remove it entirely.
699
+ """
700
+
701
+ def __init__(self, model, hook):
702
+ self.model = model
703
+ self.hook = hook
704
+
705
+ def offload(self):
706
+ self.hook.init_hook(self.model)
707
+
708
+ def remove(self):
709
+ remove_hook_from_module(self.model)
llmeval-env/lib/python3.10/site-packages/accelerate/inference.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import math
15
+ from types import MethodType
16
+ from typing import Any, Dict, List, Optional, Tuple, Union
17
+
18
+ from .state import PartialState
19
+ from .utils import (
20
+ calculate_maximum_sizes,
21
+ convert_bytes,
22
+ copy_tensor_to_devices,
23
+ ignorant_find_batch_size,
24
+ infer_auto_device_map,
25
+ is_pippy_available,
26
+ pad_input_tensors,
27
+ send_to_device,
28
+ )
29
+
30
+
31
+ if is_pippy_available():
32
+ from pippy.IR import Pipe, PipeSplitWrapper, annotate_split_points
33
+ from pippy.PipelineStage import PipelineStage
34
+
35
+
36
+ def generate_device_map(model, num_processes: int = 1, no_split_module_classes=None, max_memory: dict = None):
37
+ """
38
+ Calculates the device map for `model` with an offset for PiPPy
39
+ """
40
+ if num_processes == 1:
41
+ return infer_auto_device_map(model, no_split_module_classes=no_split_module_classes, clean_result=False)
42
+ if max_memory is None:
43
+ model_size, shared = calculate_maximum_sizes(model)
44
+
45
+ # Split into `n` chunks for each GPU
46
+ memory = (model_size + shared[0]) / num_processes
47
+ memory = convert_bytes(memory)
48
+ value, ending = memory.split(" ")
49
+
50
+ # Add a chunk to deal with potential extra shared memory instances
51
+ memory = math.ceil(float(value)) * 1.1
52
+ memory = f"{memory} {ending}"
53
+ max_memory = {i: memory for i in range(num_processes)}
54
+ device_map = infer_auto_device_map(
55
+ model,
56
+ max_memory=max_memory,
57
+ no_split_module_classes=no_split_module_classes,
58
+ clean_result=False,
59
+ )
60
+ return device_map
61
+
62
+
63
+ def find_pippy_batch_size(args, kwargs):
64
+ found_batch_size = None
65
+ if args is not None:
66
+ for arg in args:
67
+ found_batch_size = ignorant_find_batch_size(arg)
68
+ if found_batch_size is not None:
69
+ break
70
+ if kwargs is not None and found_batch_size is None:
71
+ for kwarg in kwargs.values():
72
+ found_batch_size = ignorant_find_batch_size(kwarg)
73
+ if found_batch_size is not None:
74
+ break
75
+ return found_batch_size
76
+
77
+
78
+ def build_pipeline(model, split_points, args, kwargs, num_chunks):
79
+ """
80
+ Attaches the split points to the model based on `self.device_map` and generates a `PipelineStage`. Requires passing
81
+ in needed `args` and `kwargs` as the model needs on the CPU.
82
+
83
+ Users can pass in custom `num_chunks` as an optional hyper-parameter. By default will use
84
+ `AcceleratorState.num_processes`
85
+ """
86
+ # We need to annotate the split points in the model for PiPPy
87
+ state = PartialState()
88
+ annotate_split_points(model, {split_point: PipeSplitWrapper.SplitPoint.BEGINNING for split_point in split_points})
89
+ found_batch_size = find_pippy_batch_size(args, kwargs)
90
+ if found_batch_size != num_chunks:
91
+ if args is not None:
92
+ args = pad_input_tensors(args, found_batch_size, num_chunks)
93
+ if kwargs is not None:
94
+ kwargs = pad_input_tensors(kwargs, found_batch_size, num_chunks)
95
+ pipe = Pipe.from_tracing(model, num_chunks=num_chunks, example_args=args, example_kwargs=kwargs)
96
+ stage = PipelineStage(pipe, state.local_process_index, device=state.device)
97
+
98
+ return stage
99
+
100
+
101
+ def pippy_forward(forward, num_chunks, gather_output, *args, **kwargs):
102
+ state = PartialState()
103
+ output = None
104
+
105
+ if state.num_processes == 1:
106
+ output = forward(*args, **kwargs)
107
+ elif state.is_local_main_process:
108
+ found_batch_size = find_pippy_batch_size(args, kwargs)
109
+ if found_batch_size is None:
110
+ raise ValueError("Could not find batch size from args or kwargs")
111
+ else:
112
+ if found_batch_size != num_chunks:
113
+ args = pad_input_tensors(args, found_batch_size, num_chunks)
114
+ kwargs = pad_input_tensors(kwargs, found_batch_size, num_chunks)
115
+ forward(*args, **kwargs)
116
+ elif state.is_last_process:
117
+ output = forward()
118
+ else:
119
+ forward()
120
+ if gather_output:
121
+ # Each node will get a copy of the full output which is only on the last GPU
122
+ output = copy_tensor_to_devices(output)
123
+ return output
124
+
125
+
126
+ def prepare_pippy(
127
+ model,
128
+ split_points: Optional[Union[str, List[str]]] = "auto",
129
+ no_split_module_classes: Optional[List[str]] = None,
130
+ example_args: Optional[Tuple[Any]] = (),
131
+ example_kwargs: Optional[Dict[str, Any]] = None,
132
+ num_chunks: Optional[int] = None,
133
+ gather_output: Optional[bool] = False,
134
+ ):
135
+ """
136
+ Wraps `model` for pipeline parallel inference.
137
+
138
+ Args:
139
+ model (`torch.nn.Module`):
140
+ A model we want to split for pipeline-parallel inference
141
+ split_points (`str` or `List[str]`, defaults to 'auto'):
142
+ How to generate the split points and chunk the model across each GPU. 'auto' will find the best balanced
143
+ split given any model. Should be a list of layer names in the model to split by otherwise.
144
+ no_split_module_classes (`List[str]`):
145
+ A list of class names for layers we don't want to be split.
146
+ example_args (tuple of model inputs):
147
+ The expected inputs for the model that uses order-based inputs. Recommended to use this method if possible.
148
+ example_kwargs (dict of model inputs)
149
+ The expected inputs for the model that uses dictionary-based inputs. This is a *highly* limiting structure
150
+ that requires the same keys be present at *all* inference calls. Not recommended unless the prior condition
151
+ is true for all cases.
152
+ num_chunks (`int`, defaults to the number of available GPUs):
153
+ The number of different stages the Pipeline will have. By default it will assign one chunk per GPU, but
154
+ this can be tuned and played with. In general one should have num_chunks >= num_gpus.
155
+ gather_output (`bool`, defaults to `False`):
156
+ If `True`, the output from the last GPU (which holds the true outputs) is sent across to all GPUs.
157
+ """
158
+ if not is_pippy_available():
159
+ raise ImportError(
160
+ "`pippy` was not found to be installed on your system. Please "
161
+ "install using `pip install torchpippy` or ensure you have at least version 0.2.0"
162
+ )
163
+ state = PartialState()
164
+ example_args = send_to_device(example_args, "cpu")
165
+ example_kwargs = send_to_device(example_kwargs, "cpu")
166
+ if num_chunks is None:
167
+ num_chunks = state.num_processes
168
+ if split_points == "auto":
169
+ device_map = generate_device_map(model, num_chunks, no_split_module_classes=no_split_module_classes)
170
+ split_points = []
171
+ for i in range(1, num_chunks):
172
+ split_points.append(next(k for k, v in device_map.items() if v == i))
173
+ model.hf_split_points = split_points
174
+ stage = build_pipeline(model, split_points, example_args, example_kwargs, num_chunks)
175
+ model._original_forward = model.forward
176
+ model._original_call = model.__call__
177
+ model.pippy_stage = stage
178
+ model.hf_split_points = split_points
179
+
180
+ def forward(*args, **kwargs):
181
+ return pippy_forward(stage.forward, num_chunks, gather_output, *args, **kwargs)
182
+
183
+ # To act like a decorator so that it can be popped when doing `extract_model_from_parallel`
184
+ # Note: creates an infinite recursion loop with `generate`
185
+ model_forward = MethodType(forward, model)
186
+ forward.__wrapped__ = model_forward
187
+ model.forward = forward
188
+ return model
llmeval-env/lib/python3.10/site-packages/accelerate/launchers.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import os
16
+ import sys
17
+ import tempfile
18
+
19
+ import torch
20
+
21
+ from .state import AcceleratorState, PartialState
22
+ from .utils import (
23
+ PrecisionType,
24
+ PrepareForLaunch,
25
+ are_libraries_initialized,
26
+ check_cuda_p2p_ib_support,
27
+ get_gpu_info,
28
+ is_mps_available,
29
+ patch_environment,
30
+ )
31
+
32
+
33
+ def test_launch():
34
+ "Verify a `PartialState` can be initialized."
35
+ _ = PartialState()
36
+
37
+
38
+ def notebook_launcher(
39
+ function,
40
+ args=(),
41
+ num_processes=None,
42
+ mixed_precision="no",
43
+ use_port="29500",
44
+ master_addr="127.0.0.1",
45
+ node_rank=0,
46
+ num_nodes=1,
47
+ ):
48
+ """
49
+ Launches a training function, using several processes or multiple nodes if it's possible in the current environment
50
+ (TPU with multiple cores for instance).
51
+
52
+ <Tip warning={true}>
53
+
54
+ To use this function absolutely zero calls to a CUDA device must be made in the notebook session before calling. If
55
+ any have been made, you will need to restart the notebook and make sure no cells use any CUDA capability.
56
+
57
+ Setting `ACCELERATE_DEBUG_MODE="1"` in your environment will run a test before truly launching to ensure that none
58
+ of those calls have been made.
59
+
60
+ </Tip>
61
+
62
+ Args:
63
+ function (`Callable`):
64
+ The training function to execute. If it accepts arguments, the first argument should be the index of the
65
+ process run.
66
+ args (`Tuple`):
67
+ Tuple of arguments to pass to the function (it will receive `*args`).
68
+ num_processes (`int`, *optional*):
69
+ The number of processes to use for training. Will default to 8 in Colab/Kaggle if a TPU is available, to
70
+ the number of GPUs available otherwise.
71
+ mixed_precision (`str`, *optional*, defaults to `"no"`):
72
+ If `fp16` or `bf16`, will use mixed precision training on multi-GPU.
73
+ use_port (`str`, *optional*, defaults to `"29500"`):
74
+ The port to use to communicate between processes when launching a multi-GPU training.
75
+ master_addr (`str`, *optional*, defaults to `"127.0.0.1"`):
76
+ The address to use for communication between processes.
77
+ node_rank (`int`, *optional*, defaults to 0):
78
+ The rank of the current node.
79
+ num_nodes (`int`, *optional*, defaults to 1):
80
+ The number of nodes to use for training.
81
+
82
+ Example:
83
+
84
+ ```python
85
+ # Assume this is defined in a Jupyter Notebook on an instance with two GPUs
86
+ from accelerate import notebook_launcher
87
+
88
+
89
+ def train(*args):
90
+ # Your training function here
91
+ ...
92
+
93
+
94
+ notebook_launcher(train, args=(arg1, arg2), num_processes=2, mixed_precision="fp16")
95
+ ```
96
+ """
97
+ # Are we in a google colab or a Kaggle Kernel?
98
+ in_colab = False
99
+ in_kaggle = False
100
+ if any(key.startswith("KAGGLE") for key in os.environ.keys()):
101
+ in_kaggle = True
102
+ elif "IPython" in sys.modules:
103
+ in_colab = "google.colab" in str(sys.modules["IPython"].get_ipython())
104
+
105
+ try:
106
+ mixed_precision = PrecisionType(mixed_precision.lower())
107
+ except ValueError:
108
+ raise ValueError(
109
+ f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}."
110
+ )
111
+
112
+ if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME", None) is not None):
113
+ # TPU launch
114
+ import torch_xla.distributed.xla_multiprocessing as xmp
115
+
116
+ if len(AcceleratorState._shared_state) > 0:
117
+ raise ValueError(
118
+ "To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
119
+ "your training function. Restart your notebook and make sure no cells initializes an "
120
+ "`Accelerator`."
121
+ )
122
+ if num_processes is None:
123
+ num_processes = 8
124
+
125
+ launcher = PrepareForLaunch(function, distributed_type="TPU")
126
+ print(f"Launching a training on {num_processes} TPU cores.")
127
+ xmp.spawn(launcher, args=args, nprocs=num_processes, start_method="fork")
128
+ elif in_colab and get_gpu_info()[1] < 2:
129
+ # No need for a distributed launch otherwise as it's either CPU or one GPU.
130
+ if torch.cuda.is_available():
131
+ print("Launching training on one GPU.")
132
+ else:
133
+ print("Launching training on one CPU.")
134
+ function(*args)
135
+ else:
136
+ if num_processes is None:
137
+ raise ValueError(
138
+ "You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call."
139
+ )
140
+ if node_rank >= num_nodes:
141
+ raise ValueError("The node_rank must be less than the number of nodes.")
142
+ if num_processes > 1:
143
+ # Multi-GPU launch
144
+ from torch.multiprocessing import start_processes
145
+ from torch.multiprocessing.spawn import ProcessRaisedException
146
+
147
+ if len(AcceleratorState._shared_state) > 0:
148
+ raise ValueError(
149
+ "To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
150
+ "inside your training function. Restart your notebook and make sure no cells initializes an "
151
+ "`Accelerator`."
152
+ )
153
+ # Check for specific libraries known to initialize CUDA that users constantly use
154
+ problematic_imports = are_libraries_initialized("bitsandbytes")
155
+ if len(problematic_imports) > 0:
156
+ err = (
157
+ "Could not start distributed process. Libraries known to initialize CUDA upon import have been "
158
+ "imported already. Please keep these imports inside your training function to try and help with this:"
159
+ )
160
+ for lib_name in problematic_imports:
161
+ err += f"\n\t* `{lib_name}`"
162
+ raise RuntimeError(err)
163
+
164
+ patched_env = dict(
165
+ nproc=num_processes,
166
+ node_rank=node_rank,
167
+ world_size=num_nodes * num_processes,
168
+ master_addr=master_addr,
169
+ master_port=use_port,
170
+ mixed_precision=mixed_precision,
171
+ )
172
+
173
+ # Check for CUDA P2P and IB issues
174
+ if not check_cuda_p2p_ib_support():
175
+ patched_env["nccl_p2p_disable"] = "1"
176
+ patched_env["nccl_ib_disable"] = "1"
177
+
178
+ # torch.distributed will expect a few environment variable to be here. We set the ones common to each
179
+ # process here (the other ones will be set be the launcher).
180
+ with patch_environment(**patched_env):
181
+ # First dummy launch
182
+ if os.environ.get("ACCELERATE_DEBUG_MODE", "false").lower() == "true":
183
+ launcher = PrepareForLaunch(test_launch, distributed_type="MULTI_GPU")
184
+ try:
185
+ start_processes(launcher, args=(), nprocs=num_processes, start_method="fork")
186
+ except ProcessRaisedException as e:
187
+ err = "An issue was found when verifying a stable environment for the notebook launcher."
188
+ if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
189
+ raise RuntimeError(
190
+ f"{err}"
191
+ "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
192
+ "Please review your imports and test them when running the `notebook_launcher()` to identify "
193
+ "which one is problematic and causing CUDA to be initialized."
194
+ ) from e
195
+ else:
196
+ raise RuntimeError(f"{err} The following error was raised: {e}") from e
197
+ # Now the actual launch
198
+ launcher = PrepareForLaunch(function, distributed_type="MULTI_GPU")
199
+ print(f"Launching training on {num_processes} GPUs.")
200
+ try:
201
+ start_processes(launcher, args=args, nprocs=num_processes, start_method="fork")
202
+ except ProcessRaisedException as e:
203
+ if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
204
+ raise RuntimeError(
205
+ "CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
206
+ "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
207
+ "Please review your imports and test them when running the `notebook_launcher()` to identify "
208
+ "which one is problematic and causing CUDA to be initialized."
209
+ ) from e
210
+ else:
211
+ raise RuntimeError(f"An issue was found when launching the training: {e}") from e
212
+
213
+ else:
214
+ # No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
215
+ if is_mps_available():
216
+ os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
217
+ print("Launching training on MPS.")
218
+ elif torch.cuda.is_available():
219
+ print("Launching training on one GPU.")
220
+ else:
221
+ print("Launching training on CPU.")
222
+ function(*args)
223
+
224
+
225
+ def debug_launcher(function, args=(), num_processes=2):
226
+ """
227
+ Launches a training function using several processes on CPU for debugging purposes.
228
+
229
+ <Tip warning={true}>
230
+
231
+ This function is provided for internal testing and debugging, but it's not intended for real trainings. It will
232
+ only use the CPU.
233
+
234
+ </Tip>
235
+
236
+ Args:
237
+ function (`Callable`):
238
+ The training function to execute.
239
+ args (`Tuple`):
240
+ Tuple of arguments to pass to the function (it will receive `*args`).
241
+ num_processes (`int`, *optional*, defaults to 2):
242
+ The number of processes to use for training.
243
+ """
244
+ from torch.multiprocessing import start_processes
245
+
246
+ with tempfile.NamedTemporaryFile() as tmp_file:
247
+ # torch.distributed will expect a few environment variable to be here. We set the ones common to each
248
+ # process here (the other ones will be set be the launcher).
249
+ with patch_environment(
250
+ world_size=num_processes,
251
+ master_addr="127.0.0.1",
252
+ master_port="29500",
253
+ accelerate_mixed_precision="no",
254
+ accelerate_debug_rdv_file=tmp_file.name,
255
+ accelerate_use_cpu="yes",
256
+ ):
257
+ launcher = PrepareForLaunch(function, debug=True)
258
+ start_processes(launcher, args=args, nprocs=num_processes, start_method="fork")
llmeval-env/lib/python3.10/site-packages/accelerate/local_sgd.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import torch
15
+
16
+ from accelerate import Accelerator, DistributedType
17
+
18
+
19
+ class LocalSGD:
20
+ """
21
+ A helper class to support local SGD on top of Accelerator. It simply runs a given number of updates independently
22
+ on each device, and averages model weights every K synchronization step.
23
+
24
+ It should be used only in the multi-GPU (or multi-CPU) setup without extensions such as DeepSpeed. In particular,
25
+ this is a simple implementation that cannot support scenarios such as model parallelism.
26
+
27
+
28
+ Although we are not aware of the true origins of this simple approach, the idea of local SGD is quite old and goes
29
+ back to at least:
30
+
31
+ Zhang, J., De Sa, C., Mitliagkas, I., & Ré, C. (2016). [Parallel SGD: When does averaging help?. arXiv preprint
32
+ arXiv:1606.07365.](https://arxiv.org/abs/1606.07365)
33
+
34
+ We credit the term Local SGD to the following paper (but there might be earlier references we are not aware of).
35
+
36
+ Stich, Sebastian Urban. ["Local SGD Converges Fast and Communicates Little." ICLR 2019-International Conference on
37
+ Learning Representations. No. CONF. 2019.](https://arxiv.org/abs/1805.09767)
38
+
39
+ """
40
+
41
+ def __enter__(self):
42
+ if self.enabled:
43
+ self.model_sync_obj = self.model.no_sync()
44
+ self.model_sync_obj.__enter__()
45
+
46
+ return self
47
+
48
+ def __exit__(self, type, value, tb):
49
+ if self.enabled:
50
+ # Average all models on exit
51
+ self._sync_and_avg_model_params()
52
+ self.model_sync_obj.__exit__(type, value, tb)
53
+
54
+ def __init__(self, accelerator: Accelerator, model: torch.nn.Module, local_sgd_steps: int, enabled: bool = True):
55
+ """
56
+ Constructor.
57
+
58
+ Args:
59
+ model (`torch.nn.Module):
60
+ The model whose parameters we need to average.
61
+ accelerator (`Accelerator`):
62
+ Accelerator object.
63
+ local_sgd_steps (`int`):
64
+ A number of local SGD steps (before model parameters are synchronized).
65
+ enabled (`bool):
66
+ Local SGD is disabled if this parameter set to `False`.
67
+ """
68
+ if accelerator.distributed_type not in [
69
+ DistributedType.NO,
70
+ DistributedType.MULTI_CPU,
71
+ DistributedType.MULTI_GPU,
72
+ DistributedType.MULTI_MLU,
73
+ DistributedType.MULTI_NPU,
74
+ ]:
75
+ raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)")
76
+ self.enabled = enabled and accelerator.distributed_type != DistributedType.NO
77
+ self.num_steps = 0
78
+ if self.enabled:
79
+ self.accelerator = accelerator
80
+ self.model = model
81
+ self.local_sgd_steps = local_sgd_steps
82
+
83
+ def step(self):
84
+ """
85
+ This function makes a "step" and synchronizes model parameters if necessary.
86
+ """
87
+ self.num_steps += 1
88
+ if not self.enabled:
89
+ return
90
+
91
+ if self.num_steps % self.local_sgd_steps == 0:
92
+ self._sync_and_avg_model_params()
93
+
94
+ def _sync_and_avg_model_params(self):
95
+ """
96
+ Synchronize + Average model parameters across all GPUs
97
+ """
98
+
99
+ self.accelerator.wait_for_everyone()
100
+ with self.accelerator.autocast():
101
+ for param in self.model.parameters():
102
+ param.data = self.accelerator.reduce(param.data, reduction="mean")
llmeval-env/lib/python3.10/site-packages/accelerate/logging.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import functools
16
+ import logging
17
+ import os
18
+
19
+ from .state import PartialState
20
+
21
+
22
+ class MultiProcessAdapter(logging.LoggerAdapter):
23
+ """
24
+ An adapter to assist with logging in multiprocess.
25
+
26
+ `log` takes in an additional `main_process_only` kwarg, which dictates whether it should be called on all processes
27
+ or only the main executed one. Default is `main_process_only=True`.
28
+
29
+ Does not require an `Accelerator` object to be created first.
30
+ """
31
+
32
+ @staticmethod
33
+ def _should_log(main_process_only):
34
+ "Check if log should be performed"
35
+ state = PartialState()
36
+ return not main_process_only or (main_process_only and state.is_main_process)
37
+
38
+ def log(self, level, msg, *args, **kwargs):
39
+ """
40
+ Delegates logger call after checking if we should log.
41
+
42
+ Accepts a new kwarg of `main_process_only`, which will dictate whether it will be logged across all processes
43
+ or only the main executed one. Default is `True` if not passed
44
+
45
+ Also accepts "in_order", which if `True` makes the processes log one by one, in order. This is much easier to
46
+ read, but comes at the cost of sometimes needing to wait for the other processes. Default is `False` to not
47
+ break with the previous behavior.
48
+
49
+ `in_order` is ignored if `main_process_only` is passed.
50
+ """
51
+ if PartialState._shared_state == {}:
52
+ raise RuntimeError(
53
+ "You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility."
54
+ )
55
+ main_process_only = kwargs.pop("main_process_only", True)
56
+ in_order = kwargs.pop("in_order", False)
57
+
58
+ if self.isEnabledFor(level):
59
+ if self._should_log(main_process_only):
60
+ msg, kwargs = self.process(msg, kwargs)
61
+ self.logger.log(level, msg, *args, **kwargs)
62
+
63
+ elif in_order:
64
+ state = PartialState()
65
+ for i in range(state.num_processes):
66
+ if i == state.process_index:
67
+ msg, kwargs = self.process(msg, kwargs)
68
+ self.logger.log(level, msg, *args, **kwargs)
69
+ state.wait_for_everyone()
70
+
71
+ @functools.lru_cache(None)
72
+ def warning_once(self, *args, **kwargs):
73
+ """
74
+ This method is identical to `logger.warning()`, but will emit the warning with the same message only once
75
+
76
+ Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the
77
+ cache. The assumption here is that all warning messages are unique across the code. If they aren't then need to
78
+ switch to another type of cache that includes the caller frame information in the hashing function.
79
+ """
80
+ self.warning(*args, **kwargs)
81
+
82
+
83
+ def get_logger(name: str, log_level: str = None):
84
+ """
85
+ Returns a `logging.Logger` for `name` that can handle multiprocessing.
86
+
87
+ If a log should be called on all processes, pass `main_process_only=False` If a log should be called on all
88
+ processes and in order, also pass `in_order=True`
89
+
90
+ Args:
91
+ name (`str`):
92
+ The name for the logger, such as `__file__`
93
+ log_level (`str`, *optional*):
94
+ The log level to use. If not passed, will default to the `LOG_LEVEL` environment variable, or `INFO` if not
95
+
96
+ Example:
97
+
98
+ ```python
99
+ >>> from accelerate.logging import get_logger
100
+ >>> from accelerate import Accelerator
101
+
102
+ >>> logger = get_logger(__name__)
103
+
104
+ >>> accelerator = Accelerator()
105
+ >>> logger.info("My log", main_process_only=False)
106
+ >>> logger.debug("My log", main_process_only=True)
107
+
108
+ >>> logger = get_logger(__name__, log_level="DEBUG")
109
+ >>> logger.info("My log")
110
+ >>> logger.debug("My second log")
111
+
112
+ >>> array = ["a", "b", "c", "d"]
113
+ >>> letter_at_rank = array[accelerator.process_index]
114
+ >>> logger.info(letter_at_rank, in_order=True)
115
+ ```
116
+ """
117
+ if log_level is None:
118
+ log_level = os.environ.get("ACCELERATE_LOG_LEVEL", None)
119
+ logger = logging.getLogger(name)
120
+ if log_level is not None:
121
+ logger.setLevel(log_level.upper())
122
+ logger.root.setLevel(log_level.upper())
123
+ return MultiProcessAdapter(logger, {})
llmeval-env/lib/python3.10/site-packages/accelerate/memory_utils.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import warnings
16
+
17
+
18
+ warnings.warn(
19
+ "memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
20
+ "`from accelerate import find_executable_batch_size` to avoid this warning.",
21
+ FutureWarning,
22
+ )
llmeval-env/lib/python3.10/site-packages/accelerate/optimizer.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ import warnings
17
+
18
+ import torch
19
+
20
+ from .state import AcceleratorState, GradientState
21
+ from .utils import DistributedType, honor_type, is_lomo_available, is_torch_xla_available
22
+
23
+
24
+ if is_torch_xla_available():
25
+ import torch_xla.core.xla_model as xm
26
+
27
+
28
+ def move_to_device(state, device):
29
+ if isinstance(state, (list, tuple)):
30
+ return honor_type(state, (move_to_device(t, device) for t in state))
31
+ elif isinstance(state, dict):
32
+ return type(state)({k: move_to_device(v, device) for k, v in state.items()})
33
+ elif isinstance(state, torch.Tensor):
34
+ return state.to(device)
35
+ return state
36
+
37
+
38
+ class AcceleratedOptimizer(torch.optim.Optimizer):
39
+ """
40
+ Internal wrapper around a torch optimizer.
41
+
42
+ Conditionally will perform `step` and `zero_grad` if gradients should be synchronized when performing gradient
43
+ accumulation.
44
+
45
+ Args:
46
+ optimizer (`torch.optim.optimizer.Optimizer`):
47
+ The optimizer to wrap.
48
+ device_placement (`bool`, *optional*, defaults to `True`):
49
+ Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of
50
+ `optimizer` on the right device.
51
+ scaler (`torch.cuda.amp.grad_scaler.GradScaler`, *optional*):
52
+ The scaler to use in the step function if training with mixed precision.
53
+ """
54
+
55
+ def __init__(self, optimizer, device_placement=True, scaler=None):
56
+ self.optimizer = optimizer
57
+ self.scaler = scaler
58
+ self.accelerator_state = AcceleratorState()
59
+ self.gradient_state = GradientState()
60
+ self.device_placement = device_placement
61
+ self._is_overflow = False
62
+
63
+ if self.scaler is not None:
64
+ self._accelerate_step_called = False
65
+ self._optimizer_original_step_method = self.optimizer.step
66
+ self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step)
67
+
68
+ # Handle device placement
69
+ if device_placement:
70
+ state_dict = self.optimizer.state_dict()
71
+ if self.accelerator_state.distributed_type == DistributedType.XLA:
72
+ xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device)
73
+ else:
74
+ state_dict = move_to_device(state_dict, self.accelerator_state.device)
75
+ self.optimizer.load_state_dict(state_dict)
76
+
77
+ @property
78
+ def state(self):
79
+ return self.optimizer.state
80
+
81
+ @state.setter
82
+ def state(self, state):
83
+ self.optimizer.state = state
84
+
85
+ @property
86
+ def param_groups(self):
87
+ return self.optimizer.param_groups
88
+
89
+ @param_groups.setter
90
+ def param_groups(self, param_groups):
91
+ self.optimizer.param_groups = param_groups
92
+
93
+ @property
94
+ def defaults(self):
95
+ return self.optimizer.defaults
96
+
97
+ @defaults.setter
98
+ def defaults(self, defaults):
99
+ self.optimizer.defaults = defaults
100
+
101
+ def add_param_group(self, param_group):
102
+ self.optimizer.add_param_group(param_group)
103
+
104
+ def load_state_dict(self, state_dict):
105
+ if self.accelerator_state.distributed_type == DistributedType.XLA and self.device_placement:
106
+ xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device)
107
+ self.optimizer.load_state_dict(state_dict)
108
+
109
+ def state_dict(self):
110
+ return self.optimizer.state_dict()
111
+
112
+ def zero_grad(self, set_to_none=None):
113
+ if self.gradient_state.sync_gradients:
114
+ accept_arg = "set_to_none" in inspect.signature(self.optimizer.zero_grad).parameters
115
+ if accept_arg:
116
+ if set_to_none is None:
117
+ set_to_none = True
118
+ self.optimizer.zero_grad(set_to_none=set_to_none)
119
+ else:
120
+ if set_to_none is not None:
121
+ raise ValueError("`set_to_none` for Optimizer.zero_grad` is not supported by this optimizer.")
122
+ self.optimizer.zero_grad()
123
+
124
+ def train(self):
125
+ """
126
+ Sets the optimizer to "train" mode. Useful for optimizers like `schedule_free`
127
+ """
128
+ return self.optimizer.train()
129
+
130
+ def eval(self):
131
+ """
132
+ Sets the optimizer to "eval" mode. Useful for optimizers like `schedule_free`
133
+ """
134
+ return self.optimizer.eval()
135
+
136
+ def step(self, closure=None):
137
+ if is_lomo_available():
138
+ from lomo_optim import AdaLomo, Lomo
139
+
140
+ if (
141
+ not self.gradient_state.is_xla_gradients_synced
142
+ and self.accelerator_state.distributed_type == DistributedType.XLA
143
+ ):
144
+ gradients = xm._fetch_gradients(self.optimizer)
145
+ xm.all_reduce("sum", gradients, scale=1.0 / xm.xrt_world_size())
146
+ self.gradient_state.is_xla_gradients_synced = True
147
+
148
+ if is_lomo_available():
149
+ # `step` should be a no-op for LOMO optimizers.
150
+ if isinstance(self.optimizer, (Lomo, AdaLomo)):
151
+ return
152
+
153
+ if self.gradient_state.sync_gradients:
154
+ if self.scaler is not None:
155
+ self.optimizer.step = self._optimizer_patched_step_method
156
+
157
+ self.scaler.step(self.optimizer, closure)
158
+ self.scaler.update()
159
+
160
+ if not self._accelerate_step_called:
161
+ # If the optimizer step was skipped, gradient overflow was detected.
162
+ self._is_overflow = True
163
+ else:
164
+ self._is_overflow = False
165
+ # Reset the step method to the original one
166
+ self.optimizer.step = self._optimizer_original_step_method
167
+ # Reset the indicator
168
+ self._accelerate_step_called = False
169
+ else:
170
+ self.optimizer.step(closure)
171
+ if self.accelerator_state.distributed_type == DistributedType.XLA:
172
+ self.gradient_state.is_xla_gradients_synced = False
173
+
174
+ def _switch_parameters(self, parameters_map):
175
+ for param_group in self.optimizer.param_groups:
176
+ param_group["params"] = [parameters_map.get(p, p) for p in param_group["params"]]
177
+
178
+ @property
179
+ def is_overflow(self):
180
+ """Whether or not the optimizer step was done, or skipped because of gradient overflow."""
181
+ warnings.warn(
182
+ "The `is_overflow` property is deprecated and will be removed in version 1.0 of Accelerate use "
183
+ "`optimizer.step_was_skipped` instead.",
184
+ FutureWarning,
185
+ )
186
+ return self._is_overflow
187
+
188
+ @property
189
+ def step_was_skipped(self):
190
+ """Whether or not the optimizer step was skipped."""
191
+ return self._is_overflow
192
+
193
+ def __getstate__(self):
194
+ _ignored_keys = [
195
+ "_accelerate_step_called",
196
+ "_optimizer_original_step_method",
197
+ "_optimizer_patched_step_method",
198
+ ]
199
+ return {k: v for k, v in self.__dict__.items() if k not in _ignored_keys}
200
+
201
+ def __setstate__(self, state):
202
+ self.__dict__.update(state)
203
+ if self.scaler is not None:
204
+ self._accelerate_step_called = False
205
+ self._optimizer_original_step_method = self.optimizer.step
206
+ self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step)
207
+
208
+
209
+ def patch_optimizer_step(accelerated_optimizer: AcceleratedOptimizer, method):
210
+ def patched_step(*args, **kwargs):
211
+ accelerated_optimizer._accelerate_step_called = True
212
+ return method(*args, **kwargs)
213
+
214
+ return patched_step
llmeval-env/lib/python3.10/site-packages/accelerate/scheduler.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
16
+
17
+ import warnings
18
+
19
+ from .state import AcceleratorState, GradientState
20
+
21
+
22
+ warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
23
+
24
+
25
+ class AcceleratedScheduler:
26
+ """
27
+ A wrapper around a learning rate scheduler that will only step when the optimizer(s) have a training step. Useful
28
+ to avoid making a scheduler step too fast when gradients went overflow and there was no training step (in mixed
29
+ precision training)
30
+
31
+ When performing gradient accumulation scheduler lengths should not be changed accordingly, Accelerate will always
32
+ step the scheduler to account for it.
33
+
34
+ Args:
35
+ scheduler (`torch.optim.lr_scheduler._LRScheduler`):
36
+ The scheduler to wrap.
37
+ optimizers (one or a list of `torch.optim.Optimizer`):
38
+ The optimizers used.
39
+ step_with_optimizer (`bool`, *optional*, defaults to `True`):
40
+ Whether or not the scheduler should be stepped at each optimizer step.
41
+ split_batches (`bool`, *optional*, defaults to `False`):
42
+ Whether or not the dataloaders split one batch across the different processes (so batch size is the same
43
+ regardless of the number of processes) or create batches on each process (so batch size is the original
44
+ batch size multiplied by the number of processes).
45
+ """
46
+
47
+ def __init__(self, scheduler, optimizers, step_with_optimizer: bool = True, split_batches: bool = False):
48
+ self.scheduler = scheduler
49
+ self.optimizers = optimizers if isinstance(optimizers, (list, tuple)) else [optimizers]
50
+ self.split_batches = split_batches
51
+ self.step_with_optimizer = step_with_optimizer
52
+ self.gradient_state = GradientState()
53
+
54
+ def step(self, *args, **kwargs):
55
+ if not self.step_with_optimizer:
56
+ # No link between scheduler and optimizer -> just step
57
+ self.scheduler.step(*args, **kwargs)
58
+ return
59
+
60
+ # Otherwise, first make sure the optimizer was stepped.
61
+ if not self.gradient_state.sync_gradients:
62
+ if self.gradient_state.adjust_scheduler:
63
+ self.scheduler._step_count += 1
64
+ return
65
+
66
+ for opt in self.optimizers:
67
+ if opt.step_was_skipped:
68
+ return
69
+ if self.split_batches:
70
+ # Split batches -> the training dataloader batch size is not changed so one step per training step
71
+ self.scheduler.step(*args, **kwargs)
72
+ else:
73
+ # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
74
+ # num_processes steps per training step
75
+ num_processes = AcceleratorState().num_processes
76
+ for _ in range(num_processes):
77
+ # Special case when using OneCycle and `drop_last` was not used
78
+ if hasattr(self.scheduler, "total_steps"):
79
+ if self.scheduler._step_count <= self.scheduler.total_steps:
80
+ self.scheduler.step(*args, **kwargs)
81
+ else:
82
+ self.scheduler.step(*args, **kwargs)
83
+
84
+ # Passthroughs
85
+ def get_last_lr(self):
86
+ return self.scheduler.get_last_lr()
87
+
88
+ def state_dict(self):
89
+ return self.scheduler.state_dict()
90
+
91
+ def load_state_dict(self, state_dict):
92
+ self.scheduler.load_state_dict(state_dict)
93
+
94
+ def get_lr(self):
95
+ return self.scheduler.get_lr()
96
+
97
+ def print_lr(self, *args, **kwargs):
98
+ return self.scheduler.print_lr(*args, **kwargs)
llmeval-env/lib/python3.10/site-packages/accelerate/state.py ADDED
@@ -0,0 +1,1208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from __future__ import annotations
16
+
17
+ import logging
18
+ import math
19
+ import os
20
+ import threading
21
+ import warnings
22
+ from contextlib import contextmanager
23
+ from functools import partial
24
+ from typing import Any, Callable, Optional
25
+
26
+ import torch
27
+
28
+ from .utils import (
29
+ DistributedType,
30
+ DynamoBackend,
31
+ GradientAccumulationPlugin,
32
+ check_cuda_p2p_ib_support,
33
+ check_fp8_capability,
34
+ get_ccl_version,
35
+ get_cpu_distributed_information,
36
+ get_int_from_env,
37
+ is_ccl_available,
38
+ is_datasets_available,
39
+ is_deepspeed_available,
40
+ is_fp8_available,
41
+ is_ipex_available,
42
+ is_mlu_available,
43
+ is_mps_available,
44
+ is_npu_available,
45
+ is_torch_xla_available,
46
+ is_xpu_available,
47
+ parse_choice_from_env,
48
+ parse_flag_from_env,
49
+ set_numa_affinity,
50
+ )
51
+ from .utils.dataclasses import SageMakerDistributedType
52
+
53
+
54
+ if is_torch_xla_available():
55
+ import torch_xla.core.xla_model as xm
56
+
57
+ if is_mlu_available(check_device=False):
58
+ import torch_mlu # noqa: F401
59
+
60
+ if is_npu_available(check_device=False):
61
+ import torch_npu # noqa: F401
62
+
63
+ logger = logging.getLogger(__name__)
64
+
65
+
66
+ def is_initialized() -> bool:
67
+ """
68
+ Checks if the `AcceleratorState` has been initialized from `Accelerator`. Same as `AcceleratorState.initialized`,
69
+ but works as a module method.
70
+ """
71
+ return AcceleratorState._shared_state != {}
72
+
73
+
74
+ # Lambda function that does nothing
75
+ def do_nothing(*args, **kwargs):
76
+ return None
77
+
78
+
79
+ class ThreadLocalSharedDict(threading.local):
80
+ """
81
+ Descriptor that holds a dict shared between instances of a class in the same thread.
82
+
83
+ Note: Descriptors have slightly different semantics than just a dict field on its own.
84
+ `PartialState(...)._shared_state` and `PartialState._shared_state` (instance vs class) give the same value: the
85
+ underlying _storage dict. Likewise, `PartialState(...)._shared_state = {...}` overrides the _storage dict inside
86
+ the descriptor as you would expect. However, `PartialState._shared_state = {}` actually replaces the descriptor
87
+ object with a dict instead Thus, you should modify the _storage dict in-place (e.g. `_shared_state.clear()`).
88
+
89
+ See Python documentation for an explanation of descriptors: https://docs.python.org/3/howto/descriptor.html
90
+
91
+ This is required for using PyTorch/XLA with PJRT in multithreaded mode (required for TPU v2 and v3).
92
+
93
+ See https://github.com/pytorch/xla/blob/r2.0/docs/pjrt.md#multithreading-on-tpu-v2v3
94
+ """
95
+
96
+ def __init__(self, thread_local: bool = False):
97
+ self._storage = {}
98
+
99
+ def __get__(self, obj, objtype=None):
100
+ return self._storage
101
+
102
+ def __set__(self, obj, value):
103
+ self._storage = value
104
+
105
+
106
+ # Prefer global shared dictionary, except when using TPU.
107
+ SharedDict = dict if not is_torch_xla_available() else ThreadLocalSharedDict
108
+
109
+
110
+ # Inspired by Alex Martelli's 'Borg'.
111
+ class PartialState:
112
+ """
113
+ Singleton class that has information about the current training environment and functions to help with process
114
+ control. Designed to be used when only process control and device execution states are needed. Does *not* need to
115
+ be initialized from `Accelerator`.
116
+
117
+ Args:
118
+ cpu (`bool`, *optional*):
119
+ Whether or not to force the script to execute on CPU. Will ignore any accelerators available if set to
120
+ `True` and force the execution on the CPU.
121
+ kwargs (additional keyword arguments, *optional*):
122
+ Additional keyword arguments to pass to the relevent `init_process_group` function. Valid `kwargs` can be
123
+ found in [`utils.InitProcessGroupKwargs`]. See the example section for detailed usage.
124
+
125
+ **Available attributes:**
126
+
127
+ - **device** (`torch.device`) -- The device to use.
128
+ - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently
129
+ in use.
130
+ - **local_process_index** (`int`) -- The index of the current process on the current server.
131
+ - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type
132
+ of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8').
133
+ - **num_processes** (`int`) -- The number of processes currently launched in parallel.
134
+ - **process_index** (`int`) -- The index of the current process.
135
+ - **is_last_process** (`bool`) -- Whether or not the current process is the last one.
136
+ - **is_main_process** (`bool`) -- Whether or not the current process is the main one.
137
+ - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node.
138
+ - **debug** (`bool`) -- Whether or not the current script is being run in debug mode.
139
+
140
+ Example:
141
+ ```python
142
+ from accelerate.utils import InitProcessGroupKwargs
143
+
144
+ # To include `InitProcessGroupKwargs`, init then call `.to_kwargs()`
145
+ kwargs = InitProcessGroupKwargs(...).to_kwargs()
146
+ state = PartialState(**kwargs)
147
+ ```
148
+ """
149
+
150
+ _shared_state = SharedDict()
151
+ _known_attrs = [
152
+ "_cpu",
153
+ "_mixed_precision",
154
+ "_shared_state",
155
+ "backend",
156
+ "debug",
157
+ "device",
158
+ "distributed_type",
159
+ "fork_launched",
160
+ "local_process_index",
161
+ "num_processes",
162
+ "process_index",
163
+ ]
164
+
165
+ def __init__(self, cpu: bool = False, **kwargs):
166
+ self.__dict__ = self._shared_state
167
+ if not self.initialized:
168
+ self._cpu = cpu
169
+ self.backend = None
170
+ env_device = os.environ.get("ACCELERATE_TORCH_DEVICE", None)
171
+ self.device = torch.device(env_device) if env_device is not None else None
172
+ self.debug = parse_flag_from_env("ACCELERATE_DEBUG_MODE")
173
+ use_sagemaker_dp = kwargs.pop("_use_sagemaker_dp", None)
174
+ dist_information = None
175
+ if use_sagemaker_dp is None:
176
+ use_sagemaker_dp = (
177
+ os.environ.get("ACCELERATE_USE_SAGEMAKER", "false") == "true"
178
+ and os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") != SageMakerDistributedType.NO
179
+ )
180
+
181
+ # Sets up self.backend + imports
182
+ original_backend = kwargs.pop("backend", None)
183
+ backend, distributed_type = self._prepare_backend(cpu, use_sagemaker_dp, original_backend)
184
+ if original_backend is not None and backend != original_backend:
185
+ raise ValueError("Your assigned backend {original_backend} is not avaliable, please use {backend}")
186
+ self.backend = backend
187
+ self.distributed_type = distributed_type
188
+ use_deepspeed = False
189
+ if not cpu and self.backend != "xla":
190
+ if int(os.environ.get("LOCAL_RANK", -1)) != -1:
191
+ # Deal with spawning deepspeed
192
+ if os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true":
193
+ if not is_deepspeed_available():
194
+ raise ImportError(
195
+ "DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source"
196
+ )
197
+ from deepspeed import comm as dist
198
+
199
+ if is_xpu_available() and is_ccl_available():
200
+ os.environ["CCL_PROCESS_LAUNCHER"] = "none"
201
+ os.environ["CCL_LOCAL_SIZE"] = os.environ.get("LOCAL_WORLD_SIZE", "1")
202
+ os.environ["CCL_LOCAL_RANK"] = os.environ.get("LOCAL_RANK", "0")
203
+
204
+ if not dist.is_initialized():
205
+ dist.init_distributed(dist_backend=self.backend, auto_mpi_discovery=False, **kwargs)
206
+ # We need to flag to `use_deepspeed` to be True to override `distributed_type` later
207
+ use_deepspeed = True
208
+ # Deal with all other backends but XPU and CPU, that gets handled special later
209
+ elif (
210
+ self.distributed_type not in (DistributedType.MULTI_XPU, DistributedType.MULTI_CPU)
211
+ and not torch.distributed.is_initialized()
212
+ ):
213
+ torch.distributed.init_process_group(backend=self.backend, **kwargs)
214
+ # XPU and CPU require special env configs to be set
215
+ if self.distributed_type in (DistributedType.MULTI_XPU, DistributedType.MULTI_CPU):
216
+ dist_information = get_cpu_distributed_information()
217
+ os.environ["RANK"] = str(dist_information.rank)
218
+ os.environ["WORLD_SIZE"] = str(dist_information.world_size)
219
+ os.environ["LOCAL_RANK"] = str(dist_information.local_rank)
220
+ os.environ["LOCAL_WORLD_SIZE"] = str(dist_information.local_world_size)
221
+ if self.backend == "ccl" and self.distributed_type == DistributedType.MULTI_XPU:
222
+ os.environ["CCL_PROCESS_LAUNCHER"] = "none"
223
+ os.environ["CCL_LOCAL_SIZE"] = os.environ["LOCAL_WORLD_SIZE"]
224
+ os.environ["CCL_LOCAL_RANK"] = os.environ["LOCAL_RANK"]
225
+ if not os.environ.get("MASTER_PORT", None):
226
+ os.environ["MASTER_PORT"] = "29500"
227
+ if (
228
+ not os.environ.get("MASTER_ADDR", None)
229
+ and dist_information.local_world_size != dist_information.world_size
230
+ and self.backend != "mpi"
231
+ ):
232
+ raise ValueError(
233
+ "Tried to launch on distributed with multinode, but `MASTER_ADDR` env was not set, "
234
+ "please try exporting rank 0's hostname as `MASTER_ADDR`"
235
+ )
236
+ kwargs["rank"] = dist_information.rank
237
+ kwargs["world_size"] = dist_information.world_size
238
+
239
+ if (
240
+ self.distributed_type == DistributedType.MULTI_CPU
241
+ and get_int_from_env(["OMP_NUM_THREADS", "OMP_NUM_THREADS"], 0) > 0
242
+ ):
243
+ import psutil
244
+
245
+ num_cpu_threads_per_process = int(
246
+ psutil.cpu_count(logical=False) / dist_information.local_world_size
247
+ )
248
+ if num_cpu_threads_per_process == 0:
249
+ num_cpu_threads_per_process = 1
250
+ torch.set_num_threads(num_cpu_threads_per_process)
251
+ warnings.warn(
252
+ f"OMP_NUM_THREADS/MKL_NUM_THREADS unset, we set it at {num_cpu_threads_per_process} to improve oob"
253
+ " performance."
254
+ )
255
+
256
+ if not torch.distributed.is_initialized():
257
+ torch.distributed.init_process_group(backend=self.backend, **kwargs)
258
+
259
+ # No backend == no distributed training
260
+ if self.backend is None:
261
+ self.distributed_type = DistributedType.NO
262
+ self.num_processes = 1
263
+ self.process_index = 0
264
+ self.local_process_index = 0
265
+ elif self.backend == "xla":
266
+ # XLA needs device setting first for `set_replication`
267
+ self.set_device()
268
+ xm.set_replication(self.device, xm.get_xla_supported_devices())
269
+ self.num_processes = xm.xrt_world_size()
270
+ self.process_index = xm.get_ordinal()
271
+ if is_torch_xla_available(check_is_tpu=True):
272
+ self.local_process_index = xm.get_local_ordinal()
273
+ else:
274
+ self.local_process_index = int(os.environ.get("LOCAL_RANK", -1))
275
+ else:
276
+ self.num_processes = torch.distributed.get_world_size()
277
+ self.process_index = torch.distributed.get_rank()
278
+ self.local_process_index = (
279
+ int(os.environ.get("LOCAL_RANK", -1)) if dist_information is None else dist_information.local_rank
280
+ )
281
+ self.set_device()
282
+ # Now we can change to deepseed
283
+ if use_deepspeed:
284
+ self.distributed_type = DistributedType.DEEPSPEED
285
+
286
+ # Set CPU affinity if enabled
287
+ if parse_flag_from_env("ACCELERATE_CPU_AFFINITY", False):
288
+ set_numa_affinity(self.local_process_index)
289
+
290
+ # Check for old RTX 4000's that can't use P2P or IB and are on old drivers
291
+ if self.device.type == "cuda" and not check_cuda_p2p_ib_support():
292
+ if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ:
293
+ raise NotImplementedError(
294
+ "Using RTX 4000 series doesn't support faster communication broadband via P2P or IB. "
295
+ 'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which '
296
+ "will do this automatically."
297
+ )
298
+ # Important: This should be the *only* code outside of `self.initialized!`
299
+ self.fork_launched = parse_flag_from_env("FORK_LAUNCHED", 0)
300
+
301
+ def __repr__(self) -> str:
302
+ return (
303
+ f"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\n"
304
+ f"Num processes: {self.num_processes}\n"
305
+ f"Process index: {self.process_index}\n"
306
+ f"Local process index: {self.local_process_index}\n"
307
+ f"Device: {self.device}\n"
308
+ )
309
+
310
+ @staticmethod
311
+ def _reset_state():
312
+ "Resets `_shared_state`, is used internally and should not be called"
313
+ PartialState._shared_state.clear()
314
+
315
+ @property
316
+ def initialized(self) -> bool:
317
+ "Returns whether the `PartialState` has been initialized"
318
+ return self._shared_state != {}
319
+
320
+ @property
321
+ def use_distributed(self):
322
+ """
323
+ Whether the Accelerator is configured for distributed training
324
+ """
325
+ return self.distributed_type != DistributedType.NO and self.num_processes > 1
326
+
327
+ @property
328
+ def is_last_process(self) -> bool:
329
+ "Returns whether the current process is the last one"
330
+ return self.process_index == self.num_processes - 1
331
+
332
+ @property
333
+ def is_main_process(self) -> bool:
334
+ "Returns whether the current process is the main process"
335
+ return (
336
+ self.process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process
337
+ )
338
+
339
+ @property
340
+ def is_local_main_process(self) -> bool:
341
+ "Returns whether the current process is the main process on the local node"
342
+ return (
343
+ self.local_process_index == 0
344
+ if self.distributed_type != DistributedType.MEGATRON_LM
345
+ else self.is_last_process
346
+ )
347
+
348
+ def wait_for_everyone(self):
349
+ """
350
+ Will stop the execution of the current process until every other process has reached that point (so this does
351
+ nothing when the script is only run in one process). Useful to do before saving a model.
352
+
353
+ Example:
354
+
355
+ ```python
356
+ >>> # Assuming two GPU processes
357
+ >>> import time
358
+ >>> from accelerate.state import PartialState
359
+
360
+ >>> state = PartialState()
361
+ >>> if state.is_main_process:
362
+ ... time.sleep(2)
363
+ >>> else:
364
+ ... print("I'm waiting for the main process to finish its sleep...")
365
+ >>> state.wait_for_everyone()
366
+ >>> # Should print on every process at the same time
367
+ >>> print("Everyone is here")
368
+ ```
369
+ """
370
+ if self.distributed_type in (
371
+ DistributedType.MULTI_GPU,
372
+ DistributedType.MULTI_MLU,
373
+ DistributedType.MULTI_NPU,
374
+ DistributedType.MULTI_XPU,
375
+ DistributedType.MULTI_CPU,
376
+ DistributedType.DEEPSPEED,
377
+ DistributedType.FSDP,
378
+ ):
379
+ torch.distributed.barrier()
380
+ elif self.distributed_type == DistributedType.XLA:
381
+ xm.rendezvous("accelerate.utils.wait_for_everyone")
382
+
383
+ def _goes_first(self, is_main: bool):
384
+ if not is_main:
385
+ self.wait_for_everyone()
386
+
387
+ yield
388
+
389
+ if is_main:
390
+ self.wait_for_everyone()
391
+
392
+ @contextmanager
393
+ def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False):
394
+ """
395
+ Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing
396
+ distributed inference, such as with different prompts.
397
+
398
+ Note that when using a `dict`, all keys need to have the same number of elements.
399
+
400
+ Args:
401
+ inputs (`list`, `tuple`, `torch.Tensor`, `dict` of `list`/`tuple`/`torch.Tensor`, or `datasets.Dataset`):
402
+ The input to split between processes.
403
+ apply_padding (`bool`, `optional`, defaults to `False`):
404
+ Whether to apply padding by repeating the last element of the input so that all processes have the same
405
+ number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing
406
+ in less inputs than there are processes. If so, just remember to drop the padded elements afterwards.
407
+
408
+
409
+ Example:
410
+
411
+ ```python
412
+ # Assume there are two processes
413
+ from accelerate import PartialState
414
+
415
+ state = PartialState()
416
+ with state.split_between_processes(["A", "B", "C"]) as inputs:
417
+ print(inputs)
418
+ # Process 0
419
+ ["A", "B"]
420
+ # Process 1
421
+ ["C"]
422
+
423
+ with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs:
424
+ print(inputs)
425
+ # Process 0
426
+ ["A", "B"]
427
+ # Process 1
428
+ ["C", "C"]
429
+ ```
430
+ """
431
+ if self.num_processes == 1:
432
+ yield inputs
433
+ return
434
+ length = len(inputs)
435
+ # Nested dictionary of any types
436
+ if isinstance(inputs, dict):
437
+ length = len(inputs[list(inputs.keys())[0]])
438
+ if not all(len(v) == length for v in inputs.values()):
439
+ raise ValueError("All values in the dictionary must have the same length")
440
+ num_samples_per_process = math.ceil(length / self.num_processes)
441
+ start_index = self.process_index * num_samples_per_process
442
+ end_index = start_index + num_samples_per_process
443
+ if (len(inputs) % self.num_processes != 0) and (self.process_index == self.num_processes - 1):
444
+ end_index = length
445
+
446
+ def _split_values(inputs, start_index, end_index):
447
+ if isinstance(inputs, (list, tuple, torch.Tensor)):
448
+ if start_index >= len(inputs):
449
+ result = inputs[-1:]
450
+ else:
451
+ result = inputs[start_index:end_index]
452
+ if apply_padding:
453
+ if isinstance(result, torch.Tensor):
454
+ from accelerate.utils import pad_across_processes, send_to_device
455
+
456
+ # The tensor needs to be on the device before we can pad it
457
+ tensorized_result = send_to_device(result, self.device)
458
+ result = pad_across_processes(tensorized_result, pad_index=inputs[-1])
459
+ else:
460
+ result += [result[-1]] * (num_samples_per_process - len(result))
461
+ return result
462
+ elif isinstance(inputs, dict):
463
+ for key in inputs.keys():
464
+ inputs[key] = _split_values(inputs[key], start_index, end_index)
465
+ return inputs
466
+ else:
467
+ if is_datasets_available():
468
+ from datasets import Dataset
469
+
470
+ if isinstance(inputs, Dataset):
471
+ if start_index >= len(inputs):
472
+ start_index = len(inputs) - 1
473
+ if end_index > len(inputs):
474
+ end_index = len(inputs)
475
+ result_idcs = list(range(start_index, end_index))
476
+ if apply_padding:
477
+ result_idcs += [end_index - 1] * (num_samples_per_process - len(result_idcs))
478
+ return inputs.select(result_idcs)
479
+ return inputs
480
+
481
+ yield _split_values(inputs, start_index, end_index)
482
+
483
+ @contextmanager
484
+ def main_process_first(self):
485
+ """
486
+ Lets the main process go first inside a with block.
487
+
488
+ The other processes will enter the with block after the main process exits.
489
+
490
+ Example:
491
+
492
+ ```python
493
+ >>> from accelerate import Accelerator
494
+
495
+ >>> accelerator = Accelerator()
496
+ >>> with accelerator.main_process_first():
497
+ ... # This will be printed first by process 0 then in a seemingly
498
+ ... # random order by the other processes.
499
+ ... print(f"This will be printed by process {accelerator.process_index}")
500
+ ```
501
+ """
502
+ yield from self._goes_first(self.is_main_process)
503
+
504
+ @contextmanager
505
+ def local_main_process_first(self):
506
+ """
507
+ Lets the local main process go inside a with block.
508
+
509
+ The other processes will enter the with block after the main process exits.
510
+
511
+ Example:
512
+
513
+ ```python
514
+ >>> from accelerate.state import PartialState
515
+
516
+ >>> state = PartialState()
517
+ >>> with state.local_main_process_first():
518
+ ... # This will be printed first by local process 0 then in a seemingly
519
+ ... # random order by the other processes.
520
+ ... print(f"This will be printed by process {state.local_process_index}")
521
+ ```
522
+ """
523
+ yield from self._goes_first(self.is_local_main_process)
524
+
525
+ def on_main_process(self, function: Callable[..., Any] = None):
526
+ """
527
+ Decorator that only runs the decorated function on the main process.
528
+
529
+ Args:
530
+ function (`Callable`): The function to decorate.
531
+
532
+ Example:
533
+
534
+ ```python
535
+ >>> from accelerate.state import PartialState
536
+
537
+ >>> state = PartialState()
538
+
539
+
540
+ >>> @state.on_main_process
541
+ ... def print_something():
542
+ ... print("This will be printed by process 0 only.")
543
+
544
+
545
+ >>> print_something()
546
+ "This will be printed by process 0 only"
547
+ ```
548
+ """
549
+ if not self.initialized:
550
+ raise ValueError("The `PartialState` or `Accelerator` must be initialized before calling this function.")
551
+ if self.is_main_process or not self.use_distributed:
552
+ return function
553
+ return do_nothing
554
+
555
+ def on_local_main_process(self, function: Callable[..., Any] = None):
556
+ """
557
+ Decorator that only runs the decorated function on the local main process.
558
+
559
+ Args:
560
+ function (`Callable`): The function to decorate.
561
+
562
+ Example:
563
+ ```python
564
+ # Assume we have 2 servers with 4 processes each.
565
+ from accelerate.state import PartialState
566
+
567
+ state = PartialState()
568
+
569
+
570
+ @state.on_local_main_process
571
+ def print_something():
572
+ print("This will be printed by process 0 only on each server.")
573
+
574
+
575
+ print_something()
576
+ # On server 1:
577
+ "This will be printed by process 0 only"
578
+ # On server 2:
579
+ "This will be printed by process 0 only"
580
+ ```
581
+ """
582
+ if self.is_local_main_process or not self.use_distributed:
583
+ return function
584
+ return do_nothing
585
+
586
+ def on_last_process(self, function: Callable[..., Any]):
587
+ """
588
+ Decorator that only runs the decorated function on the last process.
589
+
590
+ Args:
591
+ function (`Callable`): The function to decorate.
592
+
593
+ Example:
594
+ ```python
595
+ # Assume we have 4 processes.
596
+ from accelerate.state import PartialState
597
+
598
+ state = PartialState()
599
+
600
+
601
+ @state.on_last_process
602
+ def print_something():
603
+ print(f"Printed on process {state.process_index}")
604
+
605
+
606
+ print_something()
607
+ "Printed on process 3"
608
+ ```
609
+ """
610
+ if self.is_last_process or not self.use_distributed:
611
+ return function
612
+ return do_nothing
613
+
614
+ def on_process(self, function: Callable[..., Any] = None, process_index: int = None):
615
+ """
616
+ Decorator that only runs the decorated function on the process with the given index.
617
+
618
+ Args:
619
+ function (`Callable`, `optional`):
620
+ The function to decorate.
621
+ process_index (`int`, `optional`):
622
+ The index of the process on which to run the function.
623
+
624
+ Example:
625
+ ```python
626
+ # Assume we have 4 processes.
627
+ from accelerate.state import PartialState
628
+
629
+ state = PartialState()
630
+
631
+
632
+ @state.on_process(process_index=2)
633
+ def print_something():
634
+ print(f"Printed on process {state.process_index}")
635
+
636
+
637
+ print_something()
638
+ "Printed on process 2"
639
+ ```
640
+ """
641
+ if function is None:
642
+ return partial(self.on_process, process_index=process_index)
643
+ if (self.process_index == process_index) or (not self.use_distributed):
644
+ return function
645
+ return do_nothing
646
+
647
+ def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None):
648
+ """
649
+ Decorator that only runs the decorated function on the process with the given index on the current node.
650
+
651
+ Args:
652
+ function (`Callable`, *optional*):
653
+ The function to decorate.
654
+ local_process_index (`int`, *optional*):
655
+ The index of the local process on which to run the function.
656
+
657
+ Example:
658
+ ```python
659
+ # Assume we have 2 servers with 4 processes each.
660
+ from accelerate import Accelerator
661
+
662
+ accelerator = Accelerator()
663
+
664
+
665
+ @accelerator.on_local_process(local_process_index=2)
666
+ def print_something():
667
+ print(f"Printed on process {accelerator.local_process_index}")
668
+
669
+
670
+ print_something()
671
+ # On server 1:
672
+ "Printed on process 2"
673
+ # On server 2:
674
+ "Printed on process 2"
675
+ ```
676
+ """
677
+ if function is None:
678
+ return partial(self.on_local_process, local_process_index=local_process_index)
679
+ if (self.local_process_index == local_process_index) or (not self.use_distributed):
680
+ return function
681
+ return do_nothing
682
+
683
+ def print(self, *args, **kwargs):
684
+ if self.is_local_main_process:
685
+ print(*args, **kwargs)
686
+
687
+ @property
688
+ def default_device(self) -> torch.device:
689
+ """
690
+ Returns the default device which is:
691
+ - MPS if `torch.backends.mps.is_available()` and `torch.backends.mps.is_built()` both return True.
692
+ - CUDA if `torch.cuda.is_available()`
693
+ - MLU if `is_mlu_available()`
694
+ - NPU if `is_npu_available()`
695
+ - CPU otherwise
696
+ """
697
+ if is_mps_available():
698
+ os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
699
+ return torch.device("mps")
700
+ elif is_mlu_available():
701
+ return torch.device("mlu")
702
+ elif torch.cuda.is_available():
703
+ return torch.device("cuda")
704
+ elif is_xpu_available():
705
+ return torch.device("xpu:0")
706
+ elif is_npu_available():
707
+ return torch.device("npu")
708
+ else:
709
+ return torch.device("cpu")
710
+
711
+ def _prepare_backend(
712
+ self, cpu: bool = False, sagemaker_dp=False, backend: str = None
713
+ ) -> tuple[str, DistributedType]:
714
+ "Prepares any imports needed before initializing the distributed backend and sets `self.backend` properly"
715
+ distributed_type = None
716
+ if sagemaker_dp:
717
+ import smdistributed.dataparallel.torch.torch_smddp # noqa
718
+
719
+ backend = "smddp"
720
+ distributed_type = DistributedType.MULTI_GPU
721
+ elif is_torch_xla_available():
722
+ backend = "xla"
723
+ distributed_type = DistributedType.XLA
724
+ elif int(os.environ.get("LOCAL_RANK", -1)) != -1 and not cpu:
725
+ if is_mlu_available():
726
+ backend = "cncl"
727
+ distributed_type = DistributedType.MULTI_MLU
728
+ elif torch.cuda.is_available():
729
+ if backend is None:
730
+ backend = "nccl"
731
+ distributed_type = DistributedType.MULTI_GPU
732
+ elif is_npu_available():
733
+ backend = "hccl"
734
+ distributed_type = DistributedType.MULTI_NPU
735
+
736
+ if distributed_type is None and (
737
+ int(os.environ.get("LOCAL_RANK", -1)) != -1
738
+ or get_int_from_env(["PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE", "WORLD_SIZE"], 1) > 1
739
+ ):
740
+ if not cpu and is_xpu_available():
741
+ distributed_type = DistributedType.MULTI_XPU
742
+ else:
743
+ distributed_type = DistributedType.MULTI_CPU
744
+
745
+ if (
746
+ backend in (None, "ccl")
747
+ and is_ccl_available()
748
+ and (get_int_from_env(["CCL_WORKER_COUNT"], 0) > 0 or distributed_type == DistributedType.MULTI_XPU)
749
+ ):
750
+ if get_ccl_version() >= "1.12":
751
+ import oneccl_bindings_for_pytorch # noqa: F401
752
+ else:
753
+ import torch_ccl # noqa: F401
754
+
755
+ backend = "ccl"
756
+ elif backend in (None, "mpi") and torch.distributed.is_mpi_available():
757
+ backend = "mpi"
758
+ else:
759
+ backend = "gloo"
760
+ if distributed_type is None:
761
+ distributed_type = DistributedType.NO
762
+
763
+ return backend, distributed_type
764
+
765
+ def set_device(self):
766
+ """
767
+ Sets the device in `self.device` to the current distributed environment.
768
+ """
769
+ if self.device is not None:
770
+ return
771
+ if self.distributed_type == DistributedType.NO:
772
+ self.device = torch.device("cpu") if self._cpu else self.default_device
773
+ return
774
+ device = str(self.distributed_type).split(".")[-1].replace("MULTI_", "").lower()
775
+ if device not in ("cpu", "gpu", "mlu", "npu", "xpu", "xla"):
776
+ raise ValueError(
777
+ f"Can't set device for {self.distributed_type} ({device}), verify we should be calling `_set_device()` for it!"
778
+ )
779
+ if device == "xla":
780
+ self.device = xm.xla_device()
781
+ else:
782
+ if device == "gpu":
783
+ device = "cuda"
784
+ self.device = torch.device(device, self.local_process_index)
785
+ if self.device is not None:
786
+ if device == "xpu":
787
+ torch.xpu.set_device(self.device)
788
+ elif device == "mlu":
789
+ torch.mlu.set_device(self.device)
790
+ elif device == "npu":
791
+ torch.npu.set_device(self.device)
792
+ elif device == "cuda":
793
+ torch.cuda.set_device(self.device)
794
+
795
+ def __getattr__(self, name: str):
796
+ # By this point we know that no attributes of `self` contain `name`,
797
+ # so we just modify the error message
798
+ if name in self._known_attrs:
799
+ raise AttributeError(
800
+ f"`PartialState` object has no attribute `{name}`. "
801
+ "This happens if `PartialState._reset_state()` was called and "
802
+ "an `Accelerator` or `PartialState` was not reinitialized."
803
+ )
804
+ # Raise a typical AttributeError
805
+ raise AttributeError(f"'PartialState' object has no attribute '{name}'")
806
+
807
+
808
+ class AcceleratorState:
809
+ """
810
+ Singleton class that has information about the current training environment.
811
+
812
+ **Available attributes:**
813
+
814
+ - **device** (`torch.device`) -- The device to use.
815
+ - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently
816
+ in use.
817
+ - **initialized** (`bool`) -- Whether or not the `AcceleratorState` has been initialized from `Accelerator`.
818
+ - **local_process_index** (`int`) -- The index of the current process on the current server.
819
+ - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type
820
+ of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8').
821
+ - **num_processes** (`int`) -- The number of processes currently launched in parallel.
822
+ - **process_index** (`int`) -- The index of the current process.
823
+ - **is_last_process** (`bool`) -- Whether or not the current process is the last one.
824
+ - **is_main_process** (`bool`) -- Whether or not the current process is the main one.
825
+ - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node.
826
+ - **debug** (`bool`) -- Whether or not the current script is being run in debug mode.
827
+ """
828
+
829
+ _shared_state = SharedDict()
830
+ _known_attrs = PartialState._known_attrs + [
831
+ "deepspeed_plugin",
832
+ "use_ipex",
833
+ "fsdp_plugin",
834
+ "megatron_lm_plugin",
835
+ "dynamo_plugin",
836
+ ]
837
+
838
+ def __init__(
839
+ self,
840
+ mixed_precision: str = None,
841
+ cpu: bool = False,
842
+ dynamo_plugin=None,
843
+ deepspeed_plugin=None,
844
+ fsdp_plugin=None,
845
+ megatron_lm_plugin=None,
846
+ _from_accelerator: bool = False,
847
+ **kwargs,
848
+ ):
849
+ self.__dict__ = self._shared_state
850
+ if parse_flag_from_env("ACCELERATE_USE_CPU"):
851
+ cpu = True
852
+ if PartialState._shared_state == {}:
853
+ PartialState(cpu, **kwargs)
854
+ self.__dict__.update(PartialState._shared_state)
855
+ self._check_initialized(mixed_precision, cpu)
856
+ if not self.initialized:
857
+ self.deepspeed_plugin = None
858
+ self.use_ipex = None
859
+ mixed_precision = (
860
+ parse_choice_from_env("ACCELERATE_MIXED_PRECISION", "no")
861
+ if mixed_precision is None
862
+ else mixed_precision.lower()
863
+ )
864
+ if mixed_precision == "fp8":
865
+ if not is_fp8_available():
866
+ raise ValueError(
867
+ "Using `fp8` precision requires `transformer_engine` or `MS-AMP` to be installed."
868
+ )
869
+ elif not check_fp8_capability():
870
+ logger.warning(
871
+ f"The current device has compute capability of {torch.cuda.get_device_capability()} which is "
872
+ "insufficient for FP8 mixed precision training (requires a GPU Hopper/Ada Lovelace "
873
+ "or higher, compute capability of 8.9 or higher). Will use FP16 instead."
874
+ )
875
+ mixed_precision = "fp16"
876
+
877
+ self.dynamo_plugin = dynamo_plugin
878
+ if not _from_accelerator:
879
+ raise ValueError(
880
+ "Please make sure to properly initialize your accelerator via `accelerator = Accelerator()` "
881
+ "before using any functionality from the `accelerate` library."
882
+ )
883
+ # deepspeed handles mixed_precision using deepspeed_config
884
+ self._mixed_precision = "no" if self.distributed_type == DistributedType.DEEPSPEED else mixed_precision
885
+ if self.distributed_type == DistributedType.XLA and is_torch_xla_available(check_is_tpu=True):
886
+ if mixed_precision == "bf16":
887
+ if os.environ.get("ACCELERATE_DOWNCAST_BF16"):
888
+ os.environ["XLA_USE_BF16"] = str(0)
889
+ os.environ["XLA_DOWNCAST_BF16"] = str(1)
890
+ self.downcast_bfloat = True
891
+ else:
892
+ os.environ["XLA_USE_BF16"] = str(1)
893
+ os.environ["XLA_DOWNCAST_BF16"] = str(0)
894
+ self.downcast_bfloat = False
895
+ elif os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" and not cpu:
896
+ self.deepspeed_plugin = deepspeed_plugin
897
+ elif self.distributed_type in [
898
+ DistributedType.MULTI_GPU,
899
+ DistributedType.MULTI_MLU,
900
+ DistributedType.MULTI_NPU,
901
+ DistributedType.MULTI_XPU,
902
+ ]:
903
+ if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true":
904
+ self.distributed_type = DistributedType.FSDP
905
+ if self._mixed_precision != "no":
906
+ fsdp_plugin.set_mixed_precision(self._mixed_precision)
907
+ self.fsdp_plugin = fsdp_plugin
908
+ if os.environ.get("ACCELERATE_USE_MEGATRON_LM", "false") == "true" and self.distributed_type not in [
909
+ DistributedType.MULTI_XPU,
910
+ ]:
911
+ self.distributed_type = DistributedType.MEGATRON_LM
912
+ megatron_lm_plugin.set_mixed_precision(self._mixed_precision)
913
+ self.megatron_lm_plugin = megatron_lm_plugin
914
+ elif self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.NO]:
915
+ if is_ipex_available():
916
+ # check if user disables it explicitly
917
+ self.use_ipex = parse_flag_from_env("ACCELERATE_USE_IPEX", default=True)
918
+ else:
919
+ self.use_ipex = False
920
+ if (
921
+ self.dynamo_plugin.backend != DynamoBackend.NO
922
+ and self._mixed_precision == "no"
923
+ and self.device.type == "cuda"
924
+ ):
925
+ torch.backends.cuda.matmul.allow_tf32 = True
926
+ PartialState._shared_state["distributed_type"] = self.distributed_type
927
+
928
+ @property
929
+ def initialized(self) -> bool:
930
+ return self._shared_state != PartialState._shared_state
931
+
932
+ def __repr__(self):
933
+ repr = PartialState().__repr__() + f"\nMixed precision type: {self.mixed_precision}\n"
934
+ if self.distributed_type == DistributedType.DEEPSPEED:
935
+ repr += f"ds_config: {self.deepspeed_plugin.deepspeed_config}\n"
936
+ return repr
937
+
938
+ def _check_initialized(self, mixed_precision=None, cpu=None):
939
+ "Checks if a modification is trying to be made and the `AcceleratorState` has already been initialized"
940
+ if self.initialized:
941
+ err = "AcceleratorState has already been initialized and cannot be changed, restart your runtime completely and pass `{flag}` to `Accelerator()`."
942
+ if cpu and self.device.type != "cpu":
943
+ raise ValueError(err.format(flag="cpu=True"))
944
+ if (
945
+ mixed_precision is not None
946
+ and mixed_precision != self._mixed_precision
947
+ and self.distributed_type != DistributedType.DEEPSPEED
948
+ ):
949
+ raise ValueError(err.format(flag=f"mixed_precision='{mixed_precision}'"))
950
+
951
+ # For backward compatibility
952
+ @property
953
+ def use_fp16(self):
954
+ warnings.warn(
955
+ "The `use_fp16` property is deprecated and will be removed in version 1.0 of Accelerate use "
956
+ "`AcceleratorState.mixed_precision == 'fp16'` instead.",
957
+ FutureWarning,
958
+ )
959
+ return self._mixed_precision != "no"
960
+
961
+ @property
962
+ def mixed_precision(self):
963
+ if self.distributed_type == DistributedType.DEEPSPEED:
964
+ config = self.deepspeed_plugin.deepspeed_config
965
+ if config.get("fp16", {}).get("enabled", False):
966
+ mixed_precision = "fp16"
967
+ elif config.get("bf16", {}).get("enabled", False):
968
+ mixed_precision = "bf16"
969
+ else:
970
+ mixed_precision = "no"
971
+ else:
972
+ mixed_precision = self._mixed_precision
973
+ return mixed_precision
974
+
975
+ @staticmethod
976
+ def _reset_state(reset_partial_state: bool = False):
977
+ "Resets `_shared_state`, is used internally and should not be called"
978
+ AcceleratorState._shared_state.clear()
979
+ if reset_partial_state:
980
+ PartialState._reset_state()
981
+
982
+ @property
983
+ def use_distributed(self):
984
+ """
985
+ Whether the Accelerator is configured for distributed training
986
+ """
987
+ return PartialState().use_distributed
988
+
989
+ @property
990
+ def is_last_process(self) -> bool:
991
+ "Returns whether the current process is the last one"
992
+ return PartialState().is_last_process
993
+
994
+ @property
995
+ def is_main_process(self) -> bool:
996
+ "Returns whether the current process is the main process"
997
+ return PartialState().is_main_process
998
+
999
+ @property
1000
+ def is_local_main_process(self) -> bool:
1001
+ "Returns whether the current process is the main process on the local node"
1002
+ return PartialState().is_local_main_process
1003
+
1004
+ def wait_for_everyone(self):
1005
+ PartialState().wait_for_everyone()
1006
+
1007
+ @contextmanager
1008
+ def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False):
1009
+ """
1010
+ Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing
1011
+ distributed inference, such as with different prompts.
1012
+
1013
+ Note that when using a `dict`, all keys need to have the same number of elements.
1014
+
1015
+ Args:
1016
+ inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`):
1017
+ The input to split between processes.
1018
+ apply_padding (`bool`, `optional`, defaults to `False`):
1019
+ Whether to apply padding by repeating the last element of the input so that all processes have the same
1020
+ number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing
1021
+ in less inputs than there are processes. If so, just remember to drop the padded elements afterwards.
1022
+
1023
+
1024
+ Example:
1025
+
1026
+ ```python
1027
+ # Assume there are two processes
1028
+ from accelerate.state import AcceleratorState
1029
+
1030
+ state = AcceleratorState()
1031
+ with state.split_between_processes(["A", "B", "C"]) as inputs:
1032
+ print(inputs)
1033
+ # Process 0
1034
+ ["A", "B"]
1035
+ # Process 1
1036
+ ["C"]
1037
+
1038
+ with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs:
1039
+ print(inputs)
1040
+ # Process 0
1041
+ ["A", "B"]
1042
+ # Process 1
1043
+ ["C", "C"]
1044
+ ```
1045
+ """
1046
+ with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs:
1047
+ yield inputs
1048
+
1049
+ @contextmanager
1050
+ def main_process_first(self):
1051
+ """
1052
+ Lets the main process go first inside a with block.
1053
+
1054
+ The other processes will enter the with block after the main process exits.
1055
+ """
1056
+ with PartialState().main_process_first():
1057
+ yield
1058
+
1059
+ @contextmanager
1060
+ def local_main_process_first(self):
1061
+ """
1062
+ Lets the local main process go inside a with block.
1063
+
1064
+ The other processes will enter the with block after the main process exits.
1065
+ """
1066
+ with PartialState().local_main_process_first():
1067
+ yield
1068
+
1069
+ def print(self, *args, **kwargs):
1070
+ PartialState().print(*args, **kwargs)
1071
+
1072
+ def __getattr__(self, name: str):
1073
+ # By this point we know that no attributes of `self` contain `name`,
1074
+ # so we just modify the error message
1075
+ if name in self._known_attrs:
1076
+ raise AttributeError(
1077
+ f"`AcceleratorState` object has no attribute `{name}`. "
1078
+ "This happens if `AcceleratorState._reset_state()` was called and "
1079
+ "an `Accelerator` or `PartialState` was not reinitialized."
1080
+ )
1081
+ # Raise a typical AttributeError
1082
+ raise AttributeError(f"'AcceleratorState' object has no attribute '{name}'")
1083
+
1084
+
1085
+ class GradientState:
1086
+ """
1087
+ Singleton class that has information related to gradient synchronization for gradient accumulation
1088
+
1089
+ **Available attributes:**
1090
+
1091
+ - **end_of_dataloader** (`bool`) -- Whether we have reached the end the current dataloader
1092
+ - **remainder** (`int`) -- The number of extra samples that were added from padding the dataloader
1093
+ - **sync_gradients** (`bool`) -- Whether the gradients should be synced across all devices
1094
+ - **active_dataloader** (`Optional[DataLoader]`) -- The dataloader that is currently being iterated over
1095
+ - **dataloader_references** (`List[Optional[DataLoader]]`) -- A list of references to the dataloaders that are
1096
+ being iterated over
1097
+ - **num_steps** (`int`) -- The number of steps to accumulate over
1098
+ - **adjust_scheduler** (`bool`) -- Whether the scheduler should be adjusted to account for the gradient
1099
+ accumulation
1100
+ - **sync_with_dataloader** (`bool`) -- Whether the gradients should be synced at the end of the dataloader
1101
+ iteration and the number of total steps reset
1102
+ - **is_xla_gradients_synced** (`bool`) -- Whether the XLA gradients have been synchronized. It is initialized
1103
+ as false. Once gradients have been reduced before the optimizer step, this flag is set to true. Subsequently,
1104
+ after each step, the flag is reset to false. FSDP will always synchronize the gradients, hence
1105
+ is_xla_gradients_synced is always true.
1106
+ """
1107
+
1108
+ _shared_state = SharedDict()
1109
+
1110
+ def __init__(self, gradient_accumulation_plugin: Optional[GradientAccumulationPlugin] = None):
1111
+ self.__dict__ = self._shared_state
1112
+ if not self.initialized:
1113
+ self.sync_gradients = True
1114
+ self.active_dataloader = None
1115
+ self.dataloader_references = [None]
1116
+ self.plugin_kwargs = (
1117
+ gradient_accumulation_plugin.to_kwargs() if gradient_accumulation_plugin is not None else {}
1118
+ )
1119
+ self._is_xla_gradients_synced = False
1120
+
1121
+ # Plugin args are different and can be updated
1122
+ if gradient_accumulation_plugin is not None and self.plugin_kwargs != gradient_accumulation_plugin.to_kwargs():
1123
+ self.plugin_kwargs = gradient_accumulation_plugin.to_kwargs()
1124
+
1125
+ @property
1126
+ def num_steps(self) -> int:
1127
+ "Returns the number of steps to accumulate over"
1128
+ return self.plugin_kwargs.get("num_steps", 1)
1129
+
1130
+ @property
1131
+ def adjust_scheduler(self) -> bool:
1132
+ "Returns whether the scheduler should be adjusted"
1133
+ return self.plugin_kwargs.get("adjust_scheduler", False)
1134
+
1135
+ @property
1136
+ def sync_with_dataloader(self) -> bool:
1137
+ "Returns whether the gradients should be synced at the end of the dataloader iteration and the number of total steps reset"
1138
+ return self.plugin_kwargs.get("sync_with_dataloader", True)
1139
+
1140
+ @property
1141
+ def initialized(self) -> bool:
1142
+ "Returns whether the `GradientState` has been initialized"
1143
+ return GradientState._shared_state != {}
1144
+
1145
+ @property
1146
+ def end_of_dataloader(self) -> bool:
1147
+ "Returns whether we have reached the end of the current dataloader"
1148
+ if not self.in_dataloader:
1149
+ return False
1150
+ return self.active_dataloader.end_of_dataloader
1151
+
1152
+ @property
1153
+ def remainder(self) -> int:
1154
+ "Returns the number of extra samples that were added from padding the dataloader"
1155
+ if not self.in_dataloader:
1156
+ return -1
1157
+ return self.active_dataloader.remainder
1158
+
1159
+ def __repr__(self):
1160
+ return (
1161
+ f"Sync Gradients: {self.sync_gradients}\n"
1162
+ f"At end of current dataloader: {self.end_of_dataloader}\n"
1163
+ f"Extra samples added: {self.remainder}\n"
1164
+ f"Gradient accumulation plugin: {self.plugin_kwargs}\n"
1165
+ )
1166
+
1167
+ @property
1168
+ def is_xla_gradients_synced(self):
1169
+ "Returns the value of is_xla_gradients_synced. FSDP will always synchronize the gradients, hence is_xla_gradients_synced is always true."
1170
+ if parse_flag_from_env("ACCELERATE_USE_FSDP", default=False):
1171
+ return True
1172
+ return self._is_xla_gradients_synced
1173
+
1174
+ @is_xla_gradients_synced.setter
1175
+ def is_xla_gradients_synced(self, is_synced):
1176
+ "Set the _is_xla_gradients_synced attribute."
1177
+ self._is_xla_gradients_synced = is_synced
1178
+
1179
+ def _set_sync_gradients(self, sync_gradients):
1180
+ "Private function that sets whether gradients should be synchronized. Users should not have to call this."
1181
+ self.sync_gradients = sync_gradients
1182
+ # Allow grad-sync to automatically work on TPUs
1183
+ if (
1184
+ self.sync_gradients
1185
+ and is_torch_xla_available(check_is_tpu=True)
1186
+ and PartialState().distributed_type == DistributedType.XLA
1187
+ ):
1188
+ xm.mark_step()
1189
+
1190
+ def _add_dataloader(self, dataloader):
1191
+ "Private function that adds a dataloader to `self.dataloader_references` and sets `in_dataloader` to `True`. Users should not have to call this."
1192
+ self.active_dataloader = dataloader
1193
+ self.dataloader_references.append(self.active_dataloader)
1194
+
1195
+ def _remove_dataloader(self, dataloader):
1196
+ "Private function that removes a dataloader from `self.dataloader_references` and sets `in_dataloader` to `False` if there are no more dataloaders. Users should not have to call this."
1197
+ self.dataloader_references.remove(dataloader)
1198
+ self.active_dataloader = self.dataloader_references[-1]
1199
+
1200
+ @property
1201
+ def in_dataloader(self) -> bool:
1202
+ "Returns whether the current process is in a dataloader"
1203
+ return self.active_dataloader is not None
1204
+
1205
+ @staticmethod
1206
+ def _reset_state():
1207
+ "Resets `_shared_state`, is used internally and should not be called"
1208
+ GradientState._shared_state.clear()