applied-ai-018 commited on
Commit
7d701df
·
verified ·
1 Parent(s): 8fb23b1

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/12.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step40/zero/21.post_attention_layernorm.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step40/zero/21.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
  4. ckpts/universal/global_step40/zero/24.input_layernorm.weight/exp_avg.pt +3 -0
  5. ckpts/universal/global_step40/zero/24.input_layernorm.weight/exp_avg_sq.pt +3 -0
  6. ckpts/universal/global_step40/zero/24.input_layernorm.weight/fp32.pt +3 -0
  7. ckpts/universal/global_step40/zero/6.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  8. ckpts/universal/global_step40/zero/6.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  9. ckpts/universal/global_step40/zero/8.post_attention_layernorm.weight/fp32.pt +3 -0
  10. venv/lib/python3.10/site-packages/accelerate/__init__.py +48 -0
  11. venv/lib/python3.10/site-packages/accelerate/accelerator.py +0 -0
  12. venv/lib/python3.10/site-packages/accelerate/big_modeling.py +627 -0
  13. venv/lib/python3.10/site-packages/accelerate/checkpointing.py +275 -0
  14. venv/lib/python3.10/site-packages/accelerate/data_loader.py +1093 -0
  15. venv/lib/python3.10/site-packages/accelerate/hooks.py +709 -0
  16. venv/lib/python3.10/site-packages/accelerate/inference.py +188 -0
  17. venv/lib/python3.10/site-packages/accelerate/launchers.py +258 -0
  18. venv/lib/python3.10/site-packages/accelerate/local_sgd.py +102 -0
  19. venv/lib/python3.10/site-packages/accelerate/logging.py +123 -0
  20. venv/lib/python3.10/site-packages/accelerate/memory_utils.py +22 -0
  21. venv/lib/python3.10/site-packages/accelerate/optimizer.py +193 -0
  22. venv/lib/python3.10/site-packages/accelerate/scheduler.py +98 -0
  23. venv/lib/python3.10/site-packages/accelerate/state.py +1209 -0
  24. venv/lib/python3.10/site-packages/accelerate/tracking.py +1023 -0
  25. venv/lib/python3.10/site-packages/accelerate/utils/bnb.py +467 -0
  26. venv/lib/python3.10/site-packages/accelerate/utils/dataclasses.py +1717 -0
  27. venv/lib/python3.10/site-packages/accelerate/utils/environment.py +274 -0
  28. venv/lib/python3.10/site-packages/accelerate/utils/memory.py +158 -0
  29. venv/lib/python3.10/site-packages/accelerate/utils/offload.py +213 -0
  30. venv/lib/python3.10/site-packages/accelerate/utils/operations.py +851 -0
  31. venv/lib/python3.10/site-packages/accelerate/utils/other.py +366 -0
  32. venv/lib/python3.10/site-packages/accelerate/utils/random.py +122 -0
  33. venv/lib/python3.10/site-packages/accelerate/utils/torch_xla.py +51 -0
  34. venv/lib/python3.10/site-packages/accelerate/utils/versions.py +56 -0
  35. venv/lib/python3.10/site-packages/more_itertools/__init__.py +6 -0
  36. venv/lib/python3.10/site-packages/more_itertools/__init__.pyi +2 -0
  37. venv/lib/python3.10/site-packages/more_itertools/__pycache__/__init__.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/more_itertools/__pycache__/more.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/more_itertools/__pycache__/recipes.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/more_itertools/more.py +0 -0
  41. venv/lib/python3.10/site-packages/more_itertools/more.pyi +695 -0
  42. venv/lib/python3.10/site-packages/more_itertools/py.typed +0 -0
  43. venv/lib/python3.10/site-packages/more_itertools/recipes.py +1012 -0
  44. venv/lib/python3.10/site-packages/more_itertools/recipes.pyi +128 -0
  45. venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/INSTALLER +1 -0
  46. venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/License.txt +1568 -0
  47. venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/METADATA +35 -0
  48. venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/RECORD +106 -0
  49. venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/WHEEL +5 -0
  50. venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/top_level.txt +1 -0
ckpts/universal/global_step40/zero/12.post_attention_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edbacfbc93504fa7e7eb8b523eff34505a29340fab99683b37bf3f889aafa6d5
3
+ size 9387
ckpts/universal/global_step40/zero/21.post_attention_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67fad17970d044aa639f8e857152bbe14562fbd3507b8c88716266925c0a12f0
3
+ size 9372
ckpts/universal/global_step40/zero/21.post_attention_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc68041cccb216325c93aae78bc6679c7030ee3664d2e32f0c8c6391a3c04eff
3
+ size 9387
ckpts/universal/global_step40/zero/24.input_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28e584416eb56d9c15050bd85584fcf2a7bc973a61f8e06c0f79c8e5355fccc2
3
+ size 9372
ckpts/universal/global_step40/zero/24.input_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7911a1ed49b9d3ac5d5b893e0f5d625f50a577e1bd2f6369accbac09e199014f
3
+ size 9387
ckpts/universal/global_step40/zero/24.input_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2eca0475f0c33ca316333e25aed82a0fd9411039108a6630a0c789539f2fc36a
3
+ size 9293
ckpts/universal/global_step40/zero/6.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0dc0e99d763151d6e6c35ac66b4f6becc058242ba2d0b2740993cb004ae7fc3
3
+ size 33555612
ckpts/universal/global_step40/zero/6.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ccde16d632556998808de4eec1b7bfd6cadd91b3d420548b42a0e807736f59d
3
+ size 33555627
ckpts/universal/global_step40/zero/8.post_attention_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe041cea5c5538c8e92e49f1b1fb2b40d287bf000e69881b4342dae9ce870f2b
3
+ size 9293
venv/lib/python3.10/site-packages/accelerate/__init__.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ __version__ = "0.29.3"
15
+
16
+ from .accelerator import Accelerator
17
+ from .big_modeling import (
18
+ cpu_offload,
19
+ cpu_offload_with_hook,
20
+ disk_offload,
21
+ dispatch_model,
22
+ init_empty_weights,
23
+ init_on_device,
24
+ load_checkpoint_and_dispatch,
25
+ )
26
+ from .data_loader import skip_first_batches
27
+ from .inference import prepare_pippy
28
+ from .launchers import debug_launcher, notebook_launcher
29
+ from .state import PartialState
30
+ from .utils import (
31
+ AutocastKwargs,
32
+ DataLoaderConfiguration,
33
+ DeepSpeedPlugin,
34
+ DistributedDataParallelKwargs,
35
+ DistributedType,
36
+ FullyShardedDataParallelPlugin,
37
+ GradScalerKwargs,
38
+ InitProcessGroupKwargs,
39
+ find_executable_batch_size,
40
+ infer_auto_device_map,
41
+ is_rich_available,
42
+ load_checkpoint_in_model,
43
+ synchronize_rng_states,
44
+ )
45
+
46
+
47
+ if is_rich_available():
48
+ from .utils import rich
venv/lib/python3.10/site-packages/accelerate/accelerator.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/accelerate/big_modeling.py ADDED
@@ -0,0 +1,627 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import logging
16
+ import os
17
+ from contextlib import contextmanager
18
+ from functools import wraps
19
+ from typing import Dict, List, Optional, Union
20
+
21
+ import torch
22
+ import torch.nn as nn
23
+
24
+ from .hooks import (
25
+ AlignDevicesHook,
26
+ CpuOffload,
27
+ UserCpuOffloadHook,
28
+ add_hook_to_module,
29
+ attach_align_device_hook,
30
+ attach_align_device_hook_on_blocks,
31
+ )
32
+ from .utils import (
33
+ OffloadedWeightsLoader,
34
+ check_cuda_p2p_ib_support,
35
+ check_device_map,
36
+ extract_submodules_state_dict,
37
+ find_tied_parameters,
38
+ get_balanced_memory,
39
+ infer_auto_device_map,
40
+ is_mlu_available,
41
+ is_npu_available,
42
+ is_torch_version,
43
+ is_xpu_available,
44
+ load_checkpoint_in_model,
45
+ offload_state_dict,
46
+ parse_flag_from_env,
47
+ retie_parameters,
48
+ )
49
+ from .utils.other import recursive_getattr
50
+
51
+
52
+ logger = logging.getLogger(__name__)
53
+
54
+
55
+ @contextmanager
56
+ def init_empty_weights(include_buffers: bool = None):
57
+ """
58
+ A context manager under which models are initialized with all parameters on the meta device, therefore creating an
59
+ empty model. Useful when just initializing the model would blow the available RAM.
60
+
61
+ Args:
62
+ include_buffers (`bool`, *optional*):
63
+ Whether or not to also put all buffers on the meta device while initializing.
64
+
65
+ Example:
66
+
67
+ ```python
68
+ import torch.nn as nn
69
+ from accelerate import init_empty_weights
70
+
71
+ # Initialize a model with 100 billions parameters in no time and without using any RAM.
72
+ with init_empty_weights():
73
+ tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])
74
+ ```
75
+
76
+ <Tip warning={true}>
77
+
78
+ Any model created under this context manager has no weights. As such you can't do something like
79
+ `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].
80
+ Make sure to overwrite the default device_map param for [`load_checkpoint_and_dispatch`], otherwise dispatch is not
81
+ called.
82
+
83
+ </Tip>
84
+ """
85
+ if include_buffers is None:
86
+ include_buffers = parse_flag_from_env("ACCELERATE_INIT_INCLUDE_BUFFERS", False)
87
+ with init_on_device(torch.device("meta"), include_buffers=include_buffers) as f:
88
+ yield f
89
+
90
+
91
+ @contextmanager
92
+ def init_on_device(device: torch.device, include_buffers: bool = None):
93
+ """
94
+ A context manager under which models are initialized with all parameters on the specified device.
95
+
96
+ Args:
97
+ device (`torch.device`):
98
+ Device to initialize all parameters on.
99
+ include_buffers (`bool`, *optional*):
100
+ Whether or not to also put all buffers on the meta device while initializing.
101
+
102
+ Example:
103
+
104
+ ```python
105
+ import torch.nn as nn
106
+ from accelerate import init_on_device
107
+
108
+ with init_on_device(device=torch.device("cuda")):
109
+ tst = nn.Liner(100, 100) # on `cuda` device
110
+ ```
111
+ """
112
+ if include_buffers is None:
113
+ include_buffers = parse_flag_from_env("ACCELERATE_INIT_INCLUDE_BUFFERS", False)
114
+
115
+ # TODO(shingjan): remove the torch version check once older versions are deprecated
116
+ if is_torch_version(">=", "2.0") and include_buffers:
117
+ with device:
118
+ yield
119
+ return
120
+
121
+ old_register_parameter = nn.Module.register_parameter
122
+ if include_buffers:
123
+ old_register_buffer = nn.Module.register_buffer
124
+
125
+ def register_empty_parameter(module, name, param):
126
+ old_register_parameter(module, name, param)
127
+ if param is not None:
128
+ param_cls = type(module._parameters[name])
129
+ kwargs = module._parameters[name].__dict__
130
+ kwargs["requires_grad"] = param.requires_grad
131
+ module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs)
132
+
133
+ def register_empty_buffer(module, name, buffer, persistent=True):
134
+ old_register_buffer(module, name, buffer, persistent=persistent)
135
+ if buffer is not None:
136
+ module._buffers[name] = module._buffers[name].to(device)
137
+
138
+ # Patch tensor creation
139
+ if include_buffers:
140
+ tensor_constructors_to_patch = {
141
+ torch_function_name: getattr(torch, torch_function_name)
142
+ for torch_function_name in ["empty", "zeros", "ones", "full"]
143
+ }
144
+ else:
145
+ tensor_constructors_to_patch = {}
146
+
147
+ def patch_tensor_constructor(fn):
148
+ def wrapper(*args, **kwargs):
149
+ kwargs["device"] = device
150
+ return fn(*args, **kwargs)
151
+
152
+ return wrapper
153
+
154
+ try:
155
+ nn.Module.register_parameter = register_empty_parameter
156
+ if include_buffers:
157
+ nn.Module.register_buffer = register_empty_buffer
158
+ for torch_function_name in tensor_constructors_to_patch.keys():
159
+ setattr(torch, torch_function_name, patch_tensor_constructor(getattr(torch, torch_function_name)))
160
+ yield
161
+ finally:
162
+ nn.Module.register_parameter = old_register_parameter
163
+ if include_buffers:
164
+ nn.Module.register_buffer = old_register_buffer
165
+ for torch_function_name, old_torch_function in tensor_constructors_to_patch.items():
166
+ setattr(torch, torch_function_name, old_torch_function)
167
+
168
+
169
+ def cpu_offload(
170
+ model: nn.Module,
171
+ execution_device: Optional[torch.device] = None,
172
+ offload_buffers: bool = False,
173
+ state_dict: Optional[Dict[str, torch.Tensor]] = None,
174
+ preload_module_classes: Optional[List[str]] = None,
175
+ ):
176
+ """
177
+ Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one
178
+ copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that
179
+ state dict and put on the execution device passed as they are needed, then offloaded again.
180
+
181
+ Args:
182
+ model (`torch.nn.Module`):
183
+ The model to offload.
184
+ execution_device (`torch.device`, *optional*):
185
+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the
186
+ model first parameter device.
187
+ offload_buffers (`bool`, *optional*, defaults to `False`):
188
+ Whether or not to offload the buffers with the model parameters.
189
+ state_dict (`Dict[str, torch.Tensor]`, *optional*):
190
+ The state dict of the model that will be kept on CPU.
191
+ preload_module_classes (`List[str]`, *optional*):
192
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
193
+ of the forward. This should only be used for classes that have submodules which are registered but not
194
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
195
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
196
+ """
197
+ if execution_device is None:
198
+ execution_device = next(iter(model.parameters())).device
199
+ if state_dict is None:
200
+ state_dict = {n: p.to("cpu") for n, p in model.state_dict().items()}
201
+
202
+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True)
203
+ attach_align_device_hook(
204
+ model,
205
+ execution_device=execution_device,
206
+ offload=True,
207
+ offload_buffers=offload_buffers,
208
+ weights_map=state_dict,
209
+ preload_module_classes=preload_module_classes,
210
+ )
211
+
212
+ return model
213
+
214
+
215
+ def cpu_offload_with_hook(
216
+ model: torch.nn.Module,
217
+ execution_device: Optional[Union[int, str, torch.device]] = None,
218
+ prev_module_hook: Optional[UserCpuOffloadHook] = None,
219
+ ):
220
+ """
221
+ Offloads a model on the CPU and puts it back to an execution device when executed. The difference with
222
+ [`cpu_offload`] is that the model stays on the execution device after the forward and is only offloaded again when
223
+ the `offload` method of the returned `hook` is called. Useful for pipelines running a model in a loop.
224
+
225
+ Args:
226
+ model (`torch.nn.Module`):
227
+ The model to offload.
228
+ execution_device(`str`, `int` or `torch.device`, *optional*):
229
+ The device on which the model should be executed. Will default to the MPS device if it's available, then
230
+ GPU 0 if there is a GPU, and finally to the CPU.
231
+ prev_module_hook (`UserCpuOffloadHook`, *optional*):
232
+ The hook sent back by this function for a previous model in the pipeline you are running. If passed, its
233
+ offload method will be called just before the forward of the model to which this hook is attached.
234
+
235
+ Example:
236
+
237
+ ```py
238
+ model_1, hook_1 = cpu_offload_with_hook(model_1, cuda_device)
239
+ model_2, hook_2 = cpu_offload_with_hook(model_2, cuda_device, prev_module_hook=hook_1)
240
+ model_3, hook_3 = cpu_offload_with_hook(model_3, cuda_device, prev_module_hook=hook_2)
241
+
242
+ hid_1 = model_1(input)
243
+ for i in range(50):
244
+ # model1 is offloaded on the CPU at the first iteration, model 2 stays on the GPU for this whole loop.
245
+ hid_2 = model_2(hid_1)
246
+ # model2 is offloaded to the CPU just before this forward.
247
+ hid_3 = model_3(hid_3)
248
+
249
+ # For model3, you need to manually call the hook offload method.
250
+ hook_3.offload()
251
+ ```
252
+ """
253
+ hook = CpuOffload(execution_device=execution_device, prev_module_hook=prev_module_hook)
254
+ add_hook_to_module(model, hook, append=True)
255
+ user_hook = UserCpuOffloadHook(model, hook)
256
+ return model, user_hook
257
+
258
+
259
+ def disk_offload(
260
+ model: nn.Module,
261
+ offload_dir: Union[str, os.PathLike],
262
+ execution_device: Optional[torch.device] = None,
263
+ offload_buffers: bool = False,
264
+ preload_module_classes: Optional[List[str]] = None,
265
+ ):
266
+ """
267
+ Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as
268
+ memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and
269
+ put on the execution device passed as they are needed, then offloaded again.
270
+
271
+ Args:
272
+ model (`torch.nn.Module`): The model to offload.
273
+ offload_dir (`str` or `os.PathLike`):
274
+ The folder in which to offload the model weights (or where the model weights are already offloaded).
275
+ execution_device (`torch.device`, *optional*):
276
+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the
277
+ model's first parameter device.
278
+ offload_buffers (`bool`, *optional*, defaults to `False`):
279
+ Whether or not to offload the buffers with the model parameters.
280
+ preload_module_classes (`List[str]`, *optional*):
281
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
282
+ of the forward. This should only be used for classes that have submodules which are registered but not
283
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
284
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
285
+ """
286
+ if not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, "index.json")):
287
+ offload_state_dict(offload_dir, model.state_dict())
288
+ if execution_device is None:
289
+ execution_device = next(iter(model.parameters())).device
290
+ weights_map = OffloadedWeightsLoader(save_folder=offload_dir)
291
+
292
+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True)
293
+ attach_align_device_hook(
294
+ model,
295
+ execution_device=execution_device,
296
+ offload=True,
297
+ offload_buffers=offload_buffers,
298
+ weights_map=weights_map,
299
+ preload_module_classes=preload_module_classes,
300
+ )
301
+
302
+ return model
303
+
304
+
305
+ def dispatch_model(
306
+ model: nn.Module,
307
+ device_map: Dict[str, Union[str, int, torch.device]],
308
+ main_device: Optional[torch.device] = None,
309
+ state_dict: Optional[Dict[str, torch.Tensor]] = None,
310
+ offload_dir: Optional[Union[str, os.PathLike]] = None,
311
+ offload_index: Optional[Dict[str, str]] = None,
312
+ offload_buffers: bool = False,
313
+ skip_keys: Optional[Union[str, List[str]]] = None,
314
+ preload_module_classes: Optional[List[str]] = None,
315
+ force_hooks: bool = False,
316
+ ):
317
+ """
318
+ Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on
319
+ the CPU or even the disk.
320
+
321
+ Args:
322
+ model (`torch.nn.Module`):
323
+ The model to dispatch.
324
+ device_map (`Dict[str, Union[str, int, torch.device]]`):
325
+ A dictionary mapping module names in the models `state_dict` to the device they should go to. Note that
326
+ `"disk"` is accepted even if it's not a proper value for `torch.device`.
327
+ main_device (`str`, `int` or `torch.device`, *optional*):
328
+ The main execution device. Will default to the first device in the `device_map` different from `"cpu"` or
329
+ `"disk"`.
330
+ state_dict (`Dict[str, torch.Tensor]`, *optional*):
331
+ The state dict of the part of the model that will be kept on CPU.
332
+ offload_dir (`str` or `os.PathLike`):
333
+ The folder in which to offload the model weights (or where the model weights are already offloaded).
334
+ offload_index (`Dict`, *optional*):
335
+ A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default
336
+ to the index saved in `save_folder`.
337
+ offload_buffers (`bool`, *optional*, defaults to `False`):
338
+ Whether or not to offload the buffers with the model parameters.
339
+ skip_keys (`str` or `List[str]`, *optional*):
340
+ A list of keys to ignore when moving inputs or outputs between devices.
341
+ preload_module_classes (`List[str]`, *optional*):
342
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
343
+ of the forward. This should only be used for classes that have submodules which are registered but not
344
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
345
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
346
+ force_hooks (`bool`, *optional*, defaults to `False`):
347
+ Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a
348
+ single device.
349
+ """
350
+ # Error early if the device map is incomplete.
351
+ check_device_map(model, device_map)
352
+
353
+ # for backward compatibility
354
+ is_bnb_quantized = (
355
+ getattr(model, "is_quantized", False) or getattr(model, "is_loaded_in_8bit", False)
356
+ ) and getattr(model, "quantization_method", "bitsandbytes") == "bitsandbytes"
357
+
358
+ # We attach hooks if the device_map has at least 2 different devices or if
359
+ # force_hooks is set to `True`. Otherwise, the model in already loaded
360
+ # in the unique device and the user can decide where to dispatch the model.
361
+ # If the model is quantized, we always force-dispatch the model
362
+ if (len(set(device_map.values())) > 1) or is_bnb_quantized or force_hooks:
363
+ if main_device is None:
364
+ if set(device_map.values()) == {"cpu"} or set(device_map.values()) == {"cpu", "disk"}:
365
+ main_device = "cpu"
366
+ else:
367
+ main_device = [d for d in device_map.values() if d not in ["cpu", "disk"]][0]
368
+
369
+ if main_device != "cpu":
370
+ cpu_modules = [name for name, device in device_map.items() if device == "cpu"]
371
+ if state_dict is None and len(cpu_modules) > 0:
372
+ state_dict = extract_submodules_state_dict(model.state_dict(), cpu_modules)
373
+
374
+ disk_modules = [name for name, device in device_map.items() if device == "disk"]
375
+ if offload_dir is None and offload_index is None and len(disk_modules) > 0:
376
+ raise ValueError(
377
+ "We need an `offload_dir` to dispatch this model according to this `device_map`, the following submodules "
378
+ f"need to be offloaded: {', '.join(disk_modules)}."
379
+ )
380
+ if (
381
+ len(disk_modules) > 0
382
+ and offload_index is None
383
+ and (not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, "index.json")))
384
+ ):
385
+ disk_state_dict = extract_submodules_state_dict(model.state_dict(), disk_modules)
386
+ offload_state_dict(offload_dir, disk_state_dict)
387
+
388
+ execution_device = {
389
+ name: main_device if device in ["cpu", "disk"] else device for name, device in device_map.items()
390
+ }
391
+ execution_device[""] = main_device
392
+ offloaded_devices = ["disk"] if main_device == "cpu" or main_device == "mps" else ["cpu", "disk"]
393
+ offload = {name: device in offloaded_devices for name, device in device_map.items()}
394
+ save_folder = offload_dir if len(disk_modules) > 0 else None
395
+ if state_dict is not None or save_folder is not None or offload_index is not None:
396
+ device = main_device if offload_index is not None else None
397
+ weights_map = OffloadedWeightsLoader(
398
+ state_dict=state_dict, save_folder=save_folder, index=offload_index, device=device
399
+ )
400
+ else:
401
+ weights_map = None
402
+
403
+ # When dispatching the model's parameters to the devices specified in device_map, we want to avoid allocating memory several times for the
404
+ # tied parameters. The dictionary tied_params_map keeps track of the already allocated data for a given tied parameter (represented by its
405
+ # original pointer) on each devices.
406
+ tied_params = find_tied_parameters(model)
407
+
408
+ tied_params_map = {}
409
+ for group in tied_params:
410
+ for param_name in group:
411
+ # data_ptr() is enough here, as `find_tied_parameters` finds tied params simply by comparing `param1 is param2`, so we don't need
412
+ # to care about views of tensors through storage_offset.
413
+ data_ptr = recursive_getattr(model, param_name).data_ptr()
414
+ tied_params_map[data_ptr] = {}
415
+
416
+ # Note: To handle the disk offloading case, we can not simply use weights_map[param_name].data_ptr() as the reference pointer,
417
+ # as we have no guarantee that safetensors' `file.get_tensor()` will always give the same pointer.
418
+
419
+ attach_align_device_hook_on_blocks(
420
+ model,
421
+ execution_device=execution_device,
422
+ offload=offload,
423
+ offload_buffers=offload_buffers,
424
+ weights_map=weights_map,
425
+ skip_keys=skip_keys,
426
+ preload_module_classes=preload_module_classes,
427
+ tied_params_map=tied_params_map,
428
+ )
429
+
430
+ # warn if there is any params on the meta device
431
+ offloaded_devices_str = " and ".join(
432
+ [device for device in set(device_map.values()) if device in ("cpu", "disk")]
433
+ )
434
+ if len(offloaded_devices_str) > 0:
435
+ logging.warning(
436
+ f"Some parameters are on the meta device device because they were offloaded to the {offloaded_devices_str}."
437
+ )
438
+
439
+ # Attaching the hook may break tied weights, so we retie them
440
+ retie_parameters(model, tied_params)
441
+
442
+ # add warning to cuda and to method
443
+ def add_warning(fn, model):
444
+ @wraps(fn)
445
+ def wrapper(*args, **kwargs):
446
+ warning_msg = "You shouldn't move a model that is dispatched using accelerate hooks."
447
+ if str(fn.__name__) == "to":
448
+ to_device = torch._C._nn._parse_to(*args, **kwargs)[0]
449
+ if to_device is not None:
450
+ logger.warning(warning_msg)
451
+ else:
452
+ logger.warning(warning_msg)
453
+ for param in model.parameters():
454
+ if param.device == torch.device("meta"):
455
+ raise RuntimeError("You can't move a model that has some modules offloaded to cpu or disk.")
456
+ return fn(*args, **kwargs)
457
+
458
+ return wrapper
459
+
460
+ model.to = add_warning(model.to, model)
461
+ if is_npu_available():
462
+ model.npu = add_warning(model.npu, model)
463
+ elif is_mlu_available():
464
+ model.mlu = add_warning(model.mlu, model)
465
+ elif is_xpu_available():
466
+ model.xpu = add_warning(model.xpu, model)
467
+ else:
468
+ model.cuda = add_warning(model.cuda, model)
469
+
470
+ # Check if we are using multi-gpus with RTX 4000 series
471
+ use_multi_gpu = len([device for device in set(device_map.values()) if device not in ("cpu", "disk")]) > 1
472
+ if use_multi_gpu and not check_cuda_p2p_ib_support():
473
+ logger.warning(
474
+ "We've detected an older driver with an RTX 4000 series GPU. These drivers have issues with P2P. "
475
+ "This can affect the multi-gpu inference when using accelerate device_map."
476
+ "Please make sure to update your driver to the latest version which resolves this."
477
+ )
478
+ else:
479
+ device = list(device_map.values())[0]
480
+ # `torch.Tensor.to(<int num>)` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)).
481
+ if is_npu_available() and isinstance(device, int):
482
+ device = f"npu:{device}"
483
+ elif is_mlu_available() and isinstance(device, int):
484
+ device = f"mlu:{device}"
485
+ elif is_xpu_available() and isinstance(device, int):
486
+ device = f"xpu:{device}"
487
+ if device != "disk":
488
+ model.to(device)
489
+ else:
490
+ raise ValueError(
491
+ "You are trying to offload the whole model to the disk. Please use the `disk_offload` function instead."
492
+ )
493
+ # Convert OrderedDict back to dict for easier usage
494
+ model.hf_device_map = dict(device_map)
495
+ return model
496
+
497
+
498
+ def load_checkpoint_and_dispatch(
499
+ model: nn.Module,
500
+ checkpoint: Union[str, os.PathLike],
501
+ device_map: Optional[Union[str, Dict[str, Union[int, str, torch.device]]]] = None,
502
+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,
503
+ no_split_module_classes: Optional[List[str]] = None,
504
+ offload_folder: Optional[Union[str, os.PathLike]] = None,
505
+ offload_buffers: bool = False,
506
+ dtype: Optional[Union[str, torch.dtype]] = None,
507
+ offload_state_dict: Optional[bool] = None,
508
+ skip_keys: Optional[Union[str, List[str]]] = None,
509
+ preload_module_classes: Optional[List[str]] = None,
510
+ force_hooks: bool = False,
511
+ strict: bool = False,
512
+ ):
513
+ """
514
+ Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are
515
+ loaded and adds the various hooks that will make this model run properly (even if split across devices).
516
+
517
+ Args:
518
+ model (`torch.nn.Module`): The model in which we want to load a checkpoint.
519
+ checkpoint (`str` or `os.PathLike`):
520
+ The folder checkpoint to load. It can be:
521
+ - a path to a file containing a whole model state dict
522
+ - a path to a `.json` file containing the index to a sharded checkpoint
523
+ - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint.
524
+ device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*):
525
+ A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer
526
+ name, once a given module name is inside, every submodule of it will be sent to the same device.
527
+
528
+ To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more
529
+ information about each option see [here](../concept_guides/big_model_inference#designing-a-device-map).
530
+ Defaults to None, which means [`dispatch_model`] will not be called.
531
+ max_memory (`Dict`, *optional*):
532
+ A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU
533
+ and the available CPU RAM if unset.
534
+ no_split_module_classes (`List[str]`, *optional*):
535
+ A list of layer class names that should never be split across device (for instance any layer that has a
536
+ residual connection).
537
+ offload_folder (`str` or `os.PathLike`, *optional*):
538
+ If the `device_map` contains any value `"disk"`, the folder where we will offload weights.
539
+ offload_buffers (`bool`, *optional*, defaults to `False`):
540
+ In the layers that are offloaded on the CPU or the hard drive, whether or not to offload the buffers as
541
+ well as the parameters.
542
+ dtype (`str` or `torch.dtype`, *optional*):
543
+ If provided, the weights will be converted to that type when loaded.
544
+ offload_state_dict (`bool`, *optional*):
545
+ If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if
546
+ the weight of the CPU state dict + the biggest shard does not fit. Will default to `True` if the device map
547
+ picked contains `"disk"` values.
548
+ skip_keys (`str` or `List[str]`, *optional*):
549
+ A list of keys to ignore when moving inputs or outputs between devices.
550
+ preload_module_classes (`List[str]`, *optional*):
551
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
552
+ of the forward. This should only be used for classes that have submodules which are registered but not
553
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
554
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
555
+ force_hooks (`bool`, *optional*, defaults to `False`):
556
+ Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a
557
+ single device.
558
+ strict (`bool`, *optional*, defaults to `False`):
559
+ Whether to strictly enforce that the keys in the checkpoint state_dict match the keys of the model's
560
+ state_dict.
561
+
562
+ Example:
563
+
564
+ ```python
565
+ >>> from accelerate import init_empty_weights, load_checkpoint_and_dispatch
566
+ >>> from huggingface_hub import hf_hub_download
567
+ >>> from transformers import AutoConfig, AutoModelForCausalLM
568
+
569
+ >>> # Download the Weights
570
+ >>> checkpoint = "EleutherAI/gpt-j-6B"
571
+ >>> weights_location = hf_hub_download(checkpoint, "pytorch_model.bin")
572
+
573
+ >>> # Create a model and initialize it with empty weights
574
+ >>> config = AutoConfig.from_pretrained(checkpoint)
575
+ >>> with init_empty_weights():
576
+ ... model = AutoModelForCausalLM.from_config(config)
577
+
578
+ >>> # Load the checkpoint and dispatch it to the right devices
579
+ >>> model = load_checkpoint_and_dispatch(
580
+ ... model, weights_location, device_map="auto", no_split_module_classes=["GPTJBlock"]
581
+ ... )
582
+ ```
583
+ """
584
+ if isinstance(device_map, str) and device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
585
+ raise ValueError(
586
+ "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
587
+ "'sequential'."
588
+ )
589
+ if isinstance(device_map, str):
590
+ if device_map != "sequential":
591
+ max_memory = get_balanced_memory(
592
+ model,
593
+ max_memory=max_memory,
594
+ no_split_module_classes=no_split_module_classes,
595
+ dtype=dtype,
596
+ low_zero=(device_map == "balanced_low_0"),
597
+ )
598
+ device_map = infer_auto_device_map(
599
+ model,
600
+ max_memory=max_memory,
601
+ no_split_module_classes=no_split_module_classes,
602
+ dtype=dtype,
603
+ offload_buffers=offload_buffers,
604
+ )
605
+ if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
606
+ offload_state_dict = True
607
+ load_checkpoint_in_model(
608
+ model,
609
+ checkpoint,
610
+ device_map=device_map,
611
+ offload_folder=offload_folder,
612
+ dtype=dtype,
613
+ offload_state_dict=offload_state_dict,
614
+ offload_buffers=offload_buffers,
615
+ strict=strict,
616
+ )
617
+ if device_map is None:
618
+ return model
619
+ return dispatch_model(
620
+ model,
621
+ device_map=device_map,
622
+ offload_dir=offload_folder,
623
+ offload_buffers=offload_buffers,
624
+ skip_keys=skip_keys,
625
+ preload_module_classes=preload_module_classes,
626
+ force_hooks=force_hooks,
627
+ )
venv/lib/python3.10/site-packages/accelerate/checkpointing.py ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import random
16
+ from pathlib import Path
17
+ from typing import List
18
+
19
+ import numpy as np
20
+ import torch
21
+ from safetensors.torch import load_file
22
+ from torch.cuda.amp import GradScaler
23
+
24
+ from .utils import (
25
+ MODEL_NAME,
26
+ OPTIMIZER_NAME,
27
+ RNG_STATE_NAME,
28
+ SAFE_MODEL_NAME,
29
+ SAFE_WEIGHTS_NAME,
30
+ SAMPLER_NAME,
31
+ SCALER_NAME,
32
+ SCHEDULER_NAME,
33
+ WEIGHTS_NAME,
34
+ get_pretty_name,
35
+ is_torch_xla_available,
36
+ is_xpu_available,
37
+ save,
38
+ )
39
+
40
+
41
+ if is_torch_xla_available():
42
+ import torch_xla.core.xla_model as xm
43
+
44
+ from .logging import get_logger
45
+ from .state import PartialState
46
+
47
+
48
+ logger = get_logger(__name__)
49
+
50
+
51
+ def save_accelerator_state(
52
+ output_dir: str,
53
+ model_states: List[dict],
54
+ optimizers: list,
55
+ schedulers: list,
56
+ dataloaders: list,
57
+ process_index: int,
58
+ scaler: GradScaler = None,
59
+ save_on_each_node: bool = False,
60
+ safe_serialization: bool = True,
61
+ ):
62
+ """
63
+ Saves the current states of the models, optimizers, scaler, and RNG generators to a given directory.
64
+
65
+ <Tip>
66
+
67
+ If `safe_serialization` is `True`, models will be saved with `safetensors` while the rest are saved using native
68
+ `pickle`.
69
+
70
+ </Tip>
71
+
72
+ Args:
73
+ output_dir (`str` or `os.PathLike`):
74
+ The name of the folder to save all relevant weights and states.
75
+ model_states (`List[torch.nn.Module]`):
76
+ A list of model states
77
+ optimizers (`List[torch.optim.Optimizer]`):
78
+ A list of optimizer instances
79
+ schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`):
80
+ A list of learning rate schedulers
81
+ dataloaders (`List[torch.utils.data.DataLoader]`):
82
+ A list of dataloader instances to save their sampler states
83
+ process_index (`int`):
84
+ The current process index in the Accelerator state
85
+ scaler (`torch.cuda.amp.GradScaler`, *optional*):
86
+ An optional gradient scaler instance to save
87
+ save_on_each_node (`bool`, *optional*):
88
+ Whether to save on every node, or only the main node.
89
+ safe_serialization (`bool`, *optional*, defaults to `True`):
90
+ Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
91
+ """
92
+ output_dir = Path(output_dir)
93
+ # Model states
94
+ for i, state in enumerate(model_states):
95
+ weights_name = WEIGHTS_NAME if not safe_serialization else SAFE_WEIGHTS_NAME
96
+ if i > 0:
97
+ weights_name = weights_name.replace(".", f"_{i}.")
98
+ output_model_file = output_dir.joinpath(weights_name)
99
+ save(state, output_model_file, save_on_each_node=save_on_each_node, safe_serialization=safe_serialization)
100
+ logger.info(f"Model weights saved in {output_model_file}")
101
+ # Optimizer states
102
+ for i, opt in enumerate(optimizers):
103
+ state = opt.state_dict()
104
+ optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin"
105
+ output_optimizer_file = output_dir.joinpath(optimizer_name)
106
+ save(state, output_optimizer_file, save_on_each_node=save_on_each_node, safe_serialization=False)
107
+ logger.info(f"Optimizer state saved in {output_optimizer_file}")
108
+ # Scheduler states
109
+ for i, scheduler in enumerate(schedulers):
110
+ state = scheduler.state_dict()
111
+ scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin"
112
+ output_scheduler_file = output_dir.joinpath(scheduler_name)
113
+ save(state, output_scheduler_file, save_on_each_node=save_on_each_node, safe_serialization=False)
114
+ logger.info(f"Scheduler state saved in {output_scheduler_file}")
115
+ # DataLoader states
116
+ for i, dataloader in enumerate(dataloaders):
117
+ sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin"
118
+ output_sampler_file = output_dir.joinpath(sampler_name)
119
+ # Only save if we have our custom sampler
120
+ from .data_loader import IterableDatasetShard, SeedableRandomSampler
121
+
122
+ if isinstance(dataloader.dataset, IterableDatasetShard):
123
+ sampler = dataloader.sampler.sampler
124
+
125
+ if isinstance(sampler, SeedableRandomSampler):
126
+ save(sampler, output_sampler_file, save_on_each_node=save_on_each_node, safe_serialization=False)
127
+ logger.info(f"Sampler state for dataloader {i} saved in {output_sampler_file}")
128
+
129
+ # GradScaler state
130
+ if scaler is not None:
131
+ state = scaler.state_dict()
132
+ output_scaler_file = output_dir.joinpath(SCALER_NAME)
133
+ torch.save(state, output_scaler_file)
134
+ logger.info(f"Gradient scaler state saved in {output_scaler_file}")
135
+ # Random number generator states
136
+ states = {}
137
+ states_name = f"{RNG_STATE_NAME}_{process_index}.pkl"
138
+ states["random_state"] = random.getstate()
139
+ states["numpy_random_seed"] = np.random.get_state()
140
+ states["torch_manual_seed"] = torch.get_rng_state()
141
+ if is_xpu_available():
142
+ states["torch_xpu_manual_seed"] = torch.xpu.get_rng_state_all()
143
+ else:
144
+ states["torch_cuda_manual_seed"] = torch.cuda.get_rng_state_all()
145
+ if is_torch_xla_available():
146
+ states["xm_seed"] = xm.get_rng_state()
147
+ output_states_file = output_dir.joinpath(states_name)
148
+ torch.save(states, output_states_file)
149
+ logger.info(f"Random states saved in {output_states_file}")
150
+ return output_dir
151
+
152
+
153
+ def load_accelerator_state(
154
+ input_dir,
155
+ models,
156
+ optimizers,
157
+ schedulers,
158
+ dataloaders,
159
+ process_index,
160
+ scaler=None,
161
+ map_location=None,
162
+ **load_model_func_kwargs,
163
+ ):
164
+ """
165
+ Loads states of the models, optimizers, scaler, and RNG generators from a given directory.
166
+
167
+ Args:
168
+ input_dir (`str` or `os.PathLike`):
169
+ The name of the folder to load all relevant weights and states.
170
+ models (`List[torch.nn.Module]`):
171
+ A list of model instances
172
+ optimizers (`List[torch.optim.Optimizer]`):
173
+ A list of optimizer instances
174
+ schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`):
175
+ A list of learning rate schedulers
176
+ process_index (`int`):
177
+ The current process index in the Accelerator state
178
+ scaler (`torch.cuda.amp.GradScaler`, *optional*):
179
+ An optional *GradScaler* instance to load
180
+ map_location (`str`, *optional*):
181
+ What device to load the optimizer state onto. Should be one of either "cpu" or "on_device".
182
+ load_model_func_kwargs (`dict`, *optional*):
183
+ Additional arguments that can be passed to the model's `load_state_dict` method.
184
+ """
185
+ if map_location not in [None, "cpu", "on_device"]:
186
+ raise TypeError(
187
+ "Unsupported optimizer map location passed, please choose one of `None`, `'cpu'`, or `'on_device'`"
188
+ )
189
+ if map_location is None:
190
+ map_location = "cpu"
191
+ elif map_location == "on_device":
192
+ map_location = PartialState().device
193
+
194
+ input_dir = Path(input_dir)
195
+ # Model states
196
+ for i, model in enumerate(models):
197
+ ending = f"_{i}" if i > 0 else ""
198
+ input_model_file = input_dir.joinpath(f"{SAFE_MODEL_NAME}{ending}.safetensors")
199
+ if input_model_file.exists():
200
+ state_dict = load_file(input_model_file, device=str(map_location))
201
+ else:
202
+ # Load with torch
203
+ input_model_file = input_dir.joinpath(f"{MODEL_NAME}{ending}.bin")
204
+ state_dict = torch.load(input_model_file, map_location=map_location)
205
+ models[i].load_state_dict(state_dict, **load_model_func_kwargs)
206
+ logger.info("All model weights loaded successfully")
207
+
208
+ # Optimizer states
209
+ for i, opt in enumerate(optimizers):
210
+ optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin"
211
+ input_optimizer_file = input_dir.joinpath(optimizer_name)
212
+ optimizer_state = torch.load(input_optimizer_file, map_location=map_location)
213
+ optimizers[i].load_state_dict(optimizer_state)
214
+ logger.info("All optimizer states loaded successfully")
215
+
216
+ # Scheduler states
217
+ for i, scheduler in enumerate(schedulers):
218
+ scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin"
219
+ input_scheduler_file = input_dir.joinpath(scheduler_name)
220
+ scheduler.load_state_dict(torch.load(input_scheduler_file))
221
+ logger.info("All scheduler states loaded successfully")
222
+
223
+ for i, dataloader in enumerate(dataloaders):
224
+ sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin"
225
+ input_sampler_file = input_dir.joinpath(sampler_name)
226
+ # Only load if we have our custom sampler
227
+ from .data_loader import IterableDatasetShard, SeedableRandomSampler
228
+
229
+ if isinstance(dataloader.dataset, IterableDatasetShard):
230
+ sampler = dataloader.sampler.sampler
231
+
232
+ if isinstance(sampler, SeedableRandomSampler):
233
+ dataloader.sampler.sampler = torch.load(input_sampler_file)
234
+ logger.info("All dataloader sampler states loaded successfully")
235
+
236
+ # GradScaler state
237
+ if scaler is not None:
238
+ input_scaler_file = input_dir.joinpath(SCALER_NAME)
239
+ scaler.load_state_dict(torch.load(input_scaler_file))
240
+ logger.info("GradScaler state loaded successfully")
241
+
242
+ # Random states
243
+ try:
244
+ states = torch.load(input_dir.joinpath(f"{RNG_STATE_NAME}_{process_index}.pkl"))
245
+ random.setstate(states["random_state"])
246
+ np.random.set_state(states["numpy_random_seed"])
247
+ torch.set_rng_state(states["torch_manual_seed"])
248
+ if is_xpu_available():
249
+ torch.xpu.set_rng_state_all(states["torch_xpu_manual_seed"])
250
+ else:
251
+ torch.cuda.set_rng_state_all(states["torch_cuda_manual_seed"])
252
+ if is_torch_xla_available():
253
+ xm.set_rng_state(states["xm_seed"])
254
+ logger.info("All random states loaded successfully")
255
+ except Exception:
256
+ logger.info("Could not load random states")
257
+
258
+
259
+ def save_custom_state(obj, path, index: int = 0, save_on_each_node: bool = False):
260
+ """
261
+ Saves the state of `obj` to `{path}/custom_checkpoint_{index}.pkl`
262
+ """
263
+ # Should this be the right way to get a qual_name type value from `obj`?
264
+ save_location = Path(path) / f"custom_checkpoint_{index}.pkl"
265
+ logger.info(f"Saving the state of {get_pretty_name(obj)} to {save_location}")
266
+ save(obj.state_dict(), save_location, save_on_each_node=save_on_each_node)
267
+
268
+
269
+ def load_custom_state(obj, path, index: int = 0):
270
+ """
271
+ Loads the state of `obj` at `{path}/custom_checkpoint_{index}.pkl`
272
+ """
273
+ load_location = f"{path}/custom_checkpoint_{index}.pkl"
274
+ logger.info(f"Loading the state of {get_pretty_name(obj)} from {load_location}")
275
+ obj.load_state_dict(torch.load(load_location, map_location="cpu"))
venv/lib/python3.10/site-packages/accelerate/data_loader.py ADDED
@@ -0,0 +1,1093 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+ from contextlib import suppress
17
+ from typing import Callable, List, Optional, Union
18
+
19
+ import torch
20
+ from torch.utils.data import BatchSampler, DataLoader, IterableDataset, RandomSampler
21
+
22
+ from .logging import get_logger
23
+ from .state import AcceleratorState, DistributedType, GradientState, is_torch_xla_available
24
+ from .utils import (
25
+ RNGType,
26
+ broadcast,
27
+ broadcast_object_list,
28
+ concatenate,
29
+ find_batch_size,
30
+ get_data_structure,
31
+ initialize_tensors,
32
+ is_torch_version,
33
+ send_to_device,
34
+ slice_tensors,
35
+ synchronize_rng_states,
36
+ )
37
+
38
+
39
+ logger = get_logger(__name__)
40
+
41
+ # kwargs of the DataLoader in min version 1.4.0.
42
+ _PYTORCH_DATALOADER_KWARGS = {
43
+ "batch_size": 1,
44
+ "shuffle": False,
45
+ "sampler": None,
46
+ "batch_sampler": None,
47
+ "num_workers": 0,
48
+ "collate_fn": None,
49
+ "pin_memory": False,
50
+ "drop_last": False,
51
+ "timeout": 0,
52
+ "worker_init_fn": None,
53
+ "multiprocessing_context": None,
54
+ "generator": None,
55
+ "prefetch_factor": 2,
56
+ "persistent_workers": False,
57
+ }
58
+
59
+ # kwargs added after by version
60
+ _PYTORCH_DATALOADER_ADDITIONAL_KWARGS = {}
61
+
62
+ for v, additional_kwargs in _PYTORCH_DATALOADER_ADDITIONAL_KWARGS.items():
63
+ if is_torch_version(">=", v):
64
+ _PYTORCH_DATALOADER_KWARGS.update(additional_kwargs)
65
+
66
+
67
+ class SeedableRandomSampler(RandomSampler):
68
+ """
69
+ Same as a random sampler, except that in `__iter__` a seed can be used.
70
+
71
+ Needed specifically in distributed cases, when the random generator for each GPU needs to start from the same seed
72
+ and be fully reproducable on multiple iterations.
73
+
74
+ If a custom `generator` is passed, it will rely on its initial seed as well as the current iteration it is on
75
+ (stored in `self.epoch`).
76
+ """
77
+
78
+ def __init__(self, *args, **kwargs):
79
+ super().__init__(*args, **kwargs)
80
+ self.epoch = 0
81
+ self.initial_seed = torch.random.initial_seed()
82
+
83
+ def __iter__(self):
84
+ if self.generator is None:
85
+ self.generator = torch.Generator()
86
+ self.generator.manual_seed(self.initial_seed)
87
+
88
+ # Allow `self.epoch` to modify the seed of the generator
89
+ seed = self.epoch + self.initial_seed
90
+ # print("Setting seed at epoch", self.epoch, seed)
91
+ self.generator.manual_seed(seed)
92
+ yield from super().__iter__()
93
+ self.set_epoch(self.epoch + 1)
94
+
95
+ def set_epoch(self, epoch: int):
96
+ "Sets the current iteration of the sampler."
97
+ self.epoch = epoch
98
+
99
+
100
+ class BatchSamplerShard(BatchSampler):
101
+ """
102
+ Wraps a PyTorch `BatchSampler` to generate batches for one of the processes only. Instances of this class will
103
+ always yield a number of batches that is a round multiple of `num_processes` and that all have the same size.
104
+ Depending on the value of the `drop_last` attribute of the batch sampler passed, it will either stop the iteration
105
+ at the first batch that would be too small / not present on all processes or loop with indices from the beginning.
106
+
107
+ Args:
108
+ batch_sampler (`torch.utils.data.sampler.BatchSampler`):
109
+ The batch sampler to split in several shards.
110
+ num_processes (`int`, *optional*, defaults to 1):
111
+ The number of processes running concurrently.
112
+ process_index (`int`, *optional*, defaults to 0):
113
+ The index of the current process.
114
+ split_batches (`bool`, *optional*, defaults to `False`):
115
+ Whether the shards should be created by splitting a batch to give a piece of it on each process, or by
116
+ yielding different full batches on each process.
117
+
118
+ On two processes with a sampler of `[[0, 1, 2, 3], [4, 5, 6, 7]]`, this will result in:
119
+
120
+ - the sampler on process 0 to yield `[0, 1, 2, 3]` and the sampler on process 1 to yield `[4, 5, 6, 7]` if
121
+ this argument is set to `False`.
122
+ - the sampler on process 0 to yield `[0, 1]` then `[4, 5]` and the sampler on process 1 to yield `[2, 3]`
123
+ then `[6, 7]` if this argument is set to `True`.
124
+ even_batches (`bool`, *optional*, defaults to `True`):
125
+ Whether or not to loop back at the beginning of the sampler when the number of samples is not a round
126
+ multiple of (original batch size / number of processes).
127
+
128
+ <Tip warning={true}>
129
+
130
+ `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches`
131
+ equal to `False`
132
+
133
+ </Tip>"""
134
+
135
+ def __init__(
136
+ self,
137
+ batch_sampler: BatchSampler,
138
+ num_processes: int = 1,
139
+ process_index: int = 0,
140
+ split_batches: bool = False,
141
+ even_batches: bool = True,
142
+ ):
143
+ if split_batches and batch_sampler.batch_size % num_processes != 0:
144
+ raise ValueError(
145
+ f"To use `BatchSamplerShard` in `split_batches` mode, the batch size ({batch_sampler.batch_size}) "
146
+ f"needs to be a round multiple of the number of processes ({num_processes})."
147
+ )
148
+ self.batch_sampler = batch_sampler
149
+ self.num_processes = num_processes
150
+ self.process_index = process_index
151
+ self.split_batches = split_batches
152
+ self.even_batches = even_batches
153
+ self.batch_size = getattr(batch_sampler, "batch_size", None)
154
+ self.drop_last = getattr(batch_sampler, "drop_last", False)
155
+ if self.batch_size is None and self.even_batches:
156
+ raise ValueError(
157
+ "You need to use `even_batches=False` when the batch sampler has no batch size. If you "
158
+ "are not calling this method directly, set `accelerator.even_batches=False` instead."
159
+ )
160
+
161
+ @property
162
+ def total_length(self):
163
+ return len(self.batch_sampler)
164
+
165
+ def __len__(self):
166
+ if self.split_batches:
167
+ # Split batches does not change the length of the batch sampler
168
+ return len(self.batch_sampler)
169
+ if len(self.batch_sampler) % self.num_processes == 0:
170
+ # If the length is a round multiple of the number of processes, it's easy.
171
+ return len(self.batch_sampler) // self.num_processes
172
+ length = len(self.batch_sampler) // self.num_processes
173
+ if self.drop_last:
174
+ # Same if we drop the remainder.
175
+ return length
176
+ elif self.even_batches:
177
+ # When we even batches we always get +1
178
+ return length + 1
179
+ else:
180
+ # Otherwise it depends on the process index.
181
+ return length + 1 if self.process_index < len(self.batch_sampler) % self.num_processes else length
182
+
183
+ def __iter__(self):
184
+ return self._iter_with_split() if self.split_batches else self._iter_with_no_split()
185
+
186
+ def _iter_with_split(self):
187
+ initial_data = []
188
+ batch_length = self.batch_sampler.batch_size // self.num_processes
189
+ for idx, batch in enumerate(self.batch_sampler):
190
+ if idx == 0:
191
+ initial_data = batch
192
+ if len(batch) == self.batch_size:
193
+ # If the batch is full, we yield the part of it this process is responsible of.
194
+ yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]
195
+
196
+ # If drop_last is True of the last batch was full, iteration is over, otherwise...
197
+ if not self.drop_last and len(initial_data) > 0 and len(batch) < self.batch_size:
198
+ if not self.even_batches:
199
+ if len(batch) > batch_length * self.process_index:
200
+ yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]
201
+ else:
202
+ # For degenerate cases where the dataset has less than num_process * batch_size samples
203
+ while len(initial_data) < self.batch_size:
204
+ initial_data += initial_data
205
+ batch = batch + initial_data
206
+ yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]
207
+
208
+ def _iter_with_no_split(self):
209
+ initial_data = []
210
+ batch_to_yield = []
211
+ for idx, batch in enumerate(self.batch_sampler):
212
+ # We gather the initial indices in case we need to circle back at the end.
213
+ if not self.drop_last and idx < self.num_processes:
214
+ initial_data += batch
215
+ # We identify the batch to yield but wait until we ar sure every process gets a full batch before actually
216
+ # yielding it.
217
+ if idx % self.num_processes == self.process_index:
218
+ batch_to_yield = batch
219
+ if idx % self.num_processes == self.num_processes - 1 and (
220
+ self.batch_size is None or len(batch) == self.batch_size
221
+ ):
222
+ yield batch_to_yield
223
+ batch_to_yield = []
224
+
225
+ # If drop_last is True, iteration is over, otherwise...
226
+ if not self.drop_last and len(initial_data) > 0:
227
+ if not self.even_batches:
228
+ if len(batch_to_yield) > 0:
229
+ yield batch_to_yield
230
+ else:
231
+ # ... we yield the complete batch we had saved before if it has the proper length
232
+ if len(batch_to_yield) == self.batch_size:
233
+ yield batch_to_yield
234
+
235
+ # For degenerate cases where the dataset has less than num_process * batch_size samples
236
+ while len(initial_data) < self.num_processes * self.batch_size:
237
+ initial_data += initial_data
238
+
239
+ # If the last batch seen was of the proper size, it has been yielded by its process so we move to the next
240
+ if len(batch) == self.batch_size:
241
+ batch = []
242
+ idx += 1
243
+
244
+ # Make sure we yield a multiple of self.num_processes batches
245
+ cycle_index = 0
246
+ while idx % self.num_processes != 0 or len(batch) > 0:
247
+ end_index = cycle_index + self.batch_size - len(batch)
248
+ batch += initial_data[cycle_index:end_index]
249
+ if idx % self.num_processes == self.process_index:
250
+ yield batch
251
+ cycle_index = end_index
252
+ batch = []
253
+ idx += 1
254
+
255
+
256
+ class IterableDatasetShard(IterableDataset):
257
+ """
258
+ Wraps a PyTorch `IterableDataset` to generate samples for one of the processes only. Instances of this class will
259
+ always yield a number of samples that is a round multiple of the actual batch size (depending of the value of
260
+ `split_batches`, this is either `batch_size` or `batch_size x num_processes`). Depending on the value of the
261
+ `drop_last` attribute of the batch sampler passed, it will either stop the iteration at the first batch that would
262
+ be too small or loop with indices from the beginning.
263
+
264
+ Args:
265
+ dataset (`torch.utils.data.dataset.IterableDataset`):
266
+ The batch sampler to split in several shards.
267
+ batch_size (`int`, *optional*, defaults to 1):
268
+ The size of the batches per shard (if `split_batches=False`) or the size of the batches (if
269
+ `split_batches=True`).
270
+ drop_last (`bool`, *optional*, defaults to `False`):
271
+ Whether or not to drop the last incomplete batch or complete the last batches by using the samples from the
272
+ beginning.
273
+ num_processes (`int`, *optional*, defaults to 1):
274
+ The number of processes running concurrently.
275
+ process_index (`int`, *optional*, defaults to 0):
276
+ The index of the current process.
277
+ split_batches (`bool`, *optional*, defaults to `False`):
278
+ Whether the shards should be created by splitting a batch to give a piece of it on each process, or by
279
+ yielding different full batches on each process.
280
+
281
+ On two processes with an iterable dataset yielding of `[0, 1, 2, 3, 4, 5, 6, 7]`, this will result in:
282
+
283
+ - the shard on process 0 to yield `[0, 1, 2, 3]` and the shard on process 1 to yield `[4, 5, 6, 7]` if this
284
+ argument is set to `False`.
285
+ - the shard on process 0 to yield `[0, 1, 4, 5]` and the sampler on process 1 to yield `[2, 3, 6, 7]` if
286
+ this argument is set to `True`.
287
+ """
288
+
289
+ def __init__(
290
+ self,
291
+ dataset: IterableDataset,
292
+ batch_size: int = 1,
293
+ drop_last: bool = False,
294
+ num_processes: int = 1,
295
+ process_index: int = 0,
296
+ split_batches: bool = False,
297
+ ):
298
+ if split_batches and batch_size > 1 and batch_size % num_processes != 0:
299
+ raise ValueError(
300
+ f"To use `IterableDatasetShard` in `split_batches` mode, the batch size ({batch_size}) "
301
+ f"needs to be a round multiple of the number of processes ({num_processes})."
302
+ )
303
+ self.dataset = dataset
304
+ self.batch_size = batch_size
305
+ self.drop_last = drop_last
306
+ self.num_processes = num_processes
307
+ self.process_index = process_index
308
+ self.split_batches = split_batches
309
+
310
+ def set_epoch(self, epoch):
311
+ self.epoch = epoch
312
+ if hasattr(self.dataset, "set_epoch"):
313
+ self.dataset.set_epoch(epoch)
314
+
315
+ def __len__(self):
316
+ # We will just raise the downstream error if the underlying dataset is not sized
317
+ if self.drop_last:
318
+ return (len(self.dataset) // (self.batch_size * self.num_processes)) * self.batch_size
319
+ else:
320
+ return math.ceil(len(self.dataset) / (self.batch_size * self.num_processes)) * self.batch_size
321
+
322
+ def __iter__(self):
323
+ if (
324
+ not hasattr(self.dataset, "set_epoch")
325
+ and hasattr(self.dataset, "generator")
326
+ and isinstance(self.dataset.generator, torch.Generator)
327
+ ):
328
+ self.dataset.generator.manual_seed(self.epoch)
329
+ real_batch_size = self.batch_size if self.split_batches else (self.batch_size * self.num_processes)
330
+ process_batch_size = (self.batch_size // self.num_processes) if self.split_batches else self.batch_size
331
+ process_slice = range(self.process_index * process_batch_size, (self.process_index + 1) * process_batch_size)
332
+
333
+ first_batch = None
334
+ current_batch = []
335
+ for element in self.dataset:
336
+ current_batch.append(element)
337
+ # Wait to have a full batch before yielding elements.
338
+ if len(current_batch) == real_batch_size:
339
+ for i in process_slice:
340
+ yield current_batch[i]
341
+ if first_batch is None:
342
+ first_batch = current_batch.copy()
343
+ current_batch = []
344
+
345
+ # Finished if drop_last is True, otherwise complete the last batch with elements from the beginning.
346
+ if not self.drop_last and len(current_batch) > 0:
347
+ if first_batch is None:
348
+ first_batch = current_batch.copy()
349
+ while len(current_batch) < real_batch_size:
350
+ current_batch += first_batch
351
+ for i in process_slice:
352
+ yield current_batch[i]
353
+
354
+
355
+ class DataLoaderStateMixin:
356
+ """
357
+ Mixin class that adds a state to a `DataLoader` to keep track of the status inside the dataloader such as at the
358
+ end of the iteration, the number of items in the dataset in the last batch relative to the batch size, and other
359
+ useful information that might be needed.
360
+
361
+ **Available attributes:**
362
+
363
+ - **end_of_dataloader** (`bool`) -- Whether at the last iteration or batch
364
+ - **remainder** (`int`) -- The number of items that are remaining in the last batch, relative to the total
365
+ batch size
366
+
367
+ """
368
+
369
+ def __init_subclass__(cls, **kwargs):
370
+ cls.end_of_dataloader = False
371
+ cls.remainder = -1
372
+
373
+ def reset(self):
374
+ self.end_of_dataloader = False
375
+ self.remainder = -1
376
+
377
+ def begin(self):
378
+ "Prepares the gradient state for the current dataloader"
379
+ self.reset()
380
+ with suppress(Exception):
381
+ if not self._drop_last:
382
+ length = getattr(self.dataset, "total_dataset_length", len(self.dataset))
383
+ self.remainder = length % self.total_batch_size
384
+ self.gradient_state._add_dataloader(self)
385
+
386
+ def end(self):
387
+ "Cleans up the gradient state after exiting the dataloader"
388
+ self.gradient_state._remove_dataloader(self)
389
+
390
+
391
+ class DataLoaderShard(DataLoader, DataLoaderStateMixin):
392
+ """
393
+ Subclass of a PyTorch `DataLoader` that will deal with device placement and current distributed setup.
394
+
395
+ Args:
396
+ dataset (`torch.utils.data.dataset.Dataset`):
397
+ The dataset to use to build this datalaoder.
398
+ device (`torch.device`, *optional*):
399
+ If passed, the device to put all batches on.
400
+ rng_types (list of `str` or [`~utils.RNGType`]):
401
+ The list of random number generators to synchronize at the beginning of each iteration. Should be one or
402
+ several of:
403
+
404
+ - `"torch"`: the base torch random number generator
405
+ - `"cuda"`: the CUDA random number generator (GPU only)
406
+ - `"xla"`: the XLA random number generator (TPU only)
407
+ - `"generator"`: an optional `torch.Generator`
408
+ synchronized_generator (`torch.Generator`, *optional*):
409
+ A random number generator to keep synchronized across processes.
410
+ skip_batches (`int`, *optional*, defaults to 0):
411
+ The number of batches to skip at the beginning.
412
+ **kwargs (additional keyword arguments, *optional*):
413
+ All other keyword arguments to pass to the regular `DataLoader` initialization.
414
+
415
+ **Available attributes:**
416
+
417
+ - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.
418
+ Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total
419
+ number of processes
420
+
421
+ - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
422
+ """
423
+
424
+ def __init__(
425
+ self,
426
+ dataset,
427
+ device=None,
428
+ rng_types=None,
429
+ synchronized_generator=None,
430
+ skip_batches=0,
431
+ _drop_last: bool = False,
432
+ **kwargs,
433
+ ):
434
+ super().__init__(dataset, **kwargs)
435
+ self.device = device
436
+ self.rng_types = rng_types
437
+ self.synchronized_generator = synchronized_generator
438
+ self.skip_batches = skip_batches
439
+ self.gradient_state = GradientState()
440
+ self._drop_last = _drop_last
441
+ self.iteration = 0
442
+
443
+ def __iter__(self):
444
+ if self.rng_types is not None:
445
+ synchronize_rng_states(self.rng_types, self.synchronized_generator)
446
+ self.begin()
447
+
448
+ self.set_epoch(self.iteration)
449
+ dataloader_iter = super().__iter__()
450
+ # We iterate one batch ahead to check when we are at the end
451
+ try:
452
+ current_batch = next(dataloader_iter)
453
+ except StopIteration:
454
+ yield
455
+
456
+ batch_index = 0
457
+ while True:
458
+ try:
459
+ # But we still move it to the device so it is done before `StopIteration` is reached
460
+ if self.device is not None:
461
+ current_batch = send_to_device(current_batch, self.device)
462
+ next_batch = next(dataloader_iter)
463
+ if batch_index >= self.skip_batches:
464
+ yield current_batch
465
+ batch_index += 1
466
+ current_batch = next_batch
467
+ except StopIteration:
468
+ self.end_of_dataloader = True
469
+ if batch_index >= self.skip_batches:
470
+ yield current_batch
471
+ break
472
+
473
+ self.iteration += 1
474
+ self.end()
475
+
476
+ def set_epoch(self, epoch: int):
477
+ # In case it is manually passed in, the user can set it to what they like
478
+ if self.iteration != epoch:
479
+ self.iteration = epoch
480
+ if hasattr(self.batch_sampler, "sampler") and hasattr(self.batch_sampler.sampler, "set_epoch"):
481
+ self.batch_sampler.sampler.set_epoch(epoch)
482
+ # We support if a custom `Dataset` implementation has `set_epoch`
483
+ # or in general HF datasets `Datasets`
484
+ elif hasattr(self.dataset, "set_epoch"):
485
+ self.dataset.set_epoch(epoch)
486
+
487
+ @property
488
+ def total_batch_size(self):
489
+ batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler
490
+ return (
491
+ batch_sampler.batch_size
492
+ if getattr(batch_sampler, "split_batches", False)
493
+ else (batch_sampler.batch_size * getattr(batch_sampler, "num_processes", 1))
494
+ )
495
+
496
+ @property
497
+ def total_dataset_length(self):
498
+ if hasattr(self.dataset, "total_length"):
499
+ return self.dataset.total_length
500
+ else:
501
+ return len(self.dataset)
502
+
503
+
504
+ if is_torch_xla_available():
505
+ import torch_xla.distributed.parallel_loader as xpl
506
+
507
+ class MpDeviceLoaderWrapper(xpl.MpDeviceLoader):
508
+ """
509
+ Wrapper for the xpl.MpDeviceLoader class that knows the total batch size.
510
+
511
+ XLA preloading threads will all call DataLoaderShard's __iter__(). Remove rng_types from DataLoaderShard to
512
+ prevent it from using the XLA device in the preloading threads, and synchronize the RNG once from the main
513
+ thread only.
514
+
515
+ **Available attributes:**
516
+
517
+ - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.
518
+ Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total
519
+ number of processes
520
+
521
+ - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
522
+ """
523
+
524
+ def __init__(self, dataloader: DataLoaderShard, device: torch.device):
525
+ super().__init__(dataloader, device)
526
+ self._rng_types = self._loader.rng_types
527
+ self._loader.rng_types = None
528
+
529
+ def __iter__(self):
530
+ if self._rng_types is not None:
531
+ synchronize_rng_states(self._rng_types, self._loader.synchronized_generator)
532
+
533
+ return super().__iter__()
534
+
535
+ @property
536
+ def total_batch_size(self):
537
+ return self._loader.total_batch_size
538
+
539
+ @property
540
+ def total_dataset_length(self):
541
+ return self._loader.total_dataset_length
542
+
543
+ @property
544
+ def batch_sampler(self):
545
+ return self._loader.batch_sampler
546
+
547
+
548
+ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
549
+ """
550
+ Subclass of a PyTorch `DataLoader` that will iterate and preprocess on process 0 only, then dispatch on each
551
+ process their part of the batch.
552
+
553
+ Args:
554
+ split_batches (`bool`, *optional*, defaults to `False`):
555
+ Whether the resulting `DataLoader` should split the batches of the original data loader across devices or
556
+ yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of
557
+ `num_processes` batches at each iteration). Another way to see this is that the observed batch size will be
558
+ the same as the initial `dataloader` if this option is set to `True`, the batch size of the initial
559
+ `dataloader` multiplied by `num_processes` otherwise. Setting this option to `True` requires that the batch
560
+ size of the `dataloader` is a round multiple of `batch_size`.
561
+ skip_batches (`int`, *optional*, defaults to 0):
562
+ The number of batches to skip at the beginning of an iteration.
563
+
564
+ **Available attributes:**
565
+
566
+ - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.
567
+ Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total
568
+ number of processes
569
+
570
+ - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
571
+ """
572
+
573
+ def __init__(
574
+ self, dataset, split_batches: bool = False, skip_batches=0, _drop_last: bool = False, slice_fn=None, **kwargs
575
+ ):
576
+ shuffle = False
577
+ if is_torch_version(">=", "1.11.0"):
578
+ from torch.utils.data.datapipes.iter.combinatorics import ShufflerIterDataPipe
579
+
580
+ # We need to save the shuffling state of the DataPipe
581
+ if isinstance(dataset, ShufflerIterDataPipe):
582
+ shuffle = dataset._shuffle_enabled
583
+ super().__init__(dataset, **kwargs)
584
+ self.split_batches = split_batches
585
+ if shuffle:
586
+ torch.utils.data.graph_settings.apply_shuffle_settings(dataset, shuffle=shuffle)
587
+
588
+ self.gradient_state = GradientState()
589
+ self.state = AcceleratorState()
590
+ self._drop_last = _drop_last
591
+ self.skip_batches = skip_batches
592
+
593
+ self.slice_fn = slice_tensors if slice_fn is None else slice_fn
594
+ self.iteration = 0
595
+
596
+ def _fetch_batches(self, iterator):
597
+ batches, batch = None, None
598
+ # On process 0, we gather the batch to dispatch.
599
+ if self.state.process_index == 0:
600
+ try:
601
+ if self.split_batches:
602
+ # One batch of the main iterator is dispatched and split.
603
+ batch = next(iterator)
604
+ else:
605
+ # num_processes batches of the main iterator are concatenated then dispatched and split.
606
+ # We add the batches one by one so we have the remainder available when drop_last=False.
607
+ batches = []
608
+ for _ in range(self.state.num_processes):
609
+ batches.append(next(iterator))
610
+ try:
611
+ batch = concatenate(batches, dim=0)
612
+ except RuntimeError as e:
613
+ raise RuntimeError(
614
+ "You can't use batches of different size with `dispatch_batches=True` or when using an `IterableDataset`."
615
+ "either pass `dispatch_batches=False` and have each process fetch its own batch "
616
+ " or pass `split_batches=True`. By doing so, the main process will fetch a full batch and "
617
+ "slice it into `num_processes` batches for each process."
618
+ ) from e
619
+ # In both cases, we need to get the structure of the batch that we will broadcast on other
620
+ # processes to initialize the tensors with the right shape.
621
+ # data_structure, stop_iteration
622
+ batch_info = [get_data_structure(batch), False]
623
+ except StopIteration:
624
+ batch_info = [None, True]
625
+ else:
626
+ batch_info = [None, self._stop_iteration]
627
+ # This is inplace, so after this instruction, every process has the same `batch_info` as process 0.
628
+ broadcast_object_list(batch_info)
629
+ self._stop_iteration = batch_info[1]
630
+ if self._stop_iteration:
631
+ # If drop_last is False and split_batches is False, we may have a remainder to take care of.
632
+ if not self.split_batches and not self._drop_last:
633
+ if self.state.process_index == 0 and len(batches) > 0:
634
+ batch = concatenate(batches, dim=0)
635
+ batch_info = [get_data_structure(batch), False]
636
+ else:
637
+ batch_info = [None, True]
638
+ broadcast_object_list(batch_info)
639
+ return batch, batch_info
640
+
641
+ def __iter__(self):
642
+ self.begin()
643
+ self.set_epoch(self.iteration)
644
+ main_iterator = None
645
+ if is_torch_version(">=", "2.0.1"):
646
+ # NOTE PyTorch DataLoader adds forward compatibilities for DataPipes, which broadcasts
647
+ # shared seed to all dist processes. Thus, we need to create iterator for all dist processes.
648
+ # But, we only iterate through the DataLoader on process 0.
649
+ main_iterator = super().__iter__()
650
+ elif self.state.process_index == 0:
651
+ main_iterator = super().__iter__()
652
+ stop_iteration = False
653
+ self._stop_iteration = False
654
+ first_batch = None
655
+ next_batch, next_batch_info = self._fetch_batches(main_iterator)
656
+ batch_index = 0
657
+ while not stop_iteration:
658
+ batch, batch_info = next_batch, next_batch_info
659
+
660
+ if self.state.process_index != 0:
661
+ # Initialize tensors on other processes than process 0.
662
+ batch = initialize_tensors(batch_info[0])
663
+ batch = send_to_device(batch, self.state.device)
664
+ # Broadcast the batch before splitting it.
665
+ batch = broadcast(batch, from_process=0)
666
+
667
+ if not self._drop_last and first_batch is None:
668
+ # We keep at least num processes elements of the first batch to be able to complete the last batch
669
+ first_batch = self.slice_fn(
670
+ batch,
671
+ slice(0, self.state.num_processes),
672
+ process_index=self.state.process_index,
673
+ num_processes=self.state.num_processes,
674
+ )
675
+
676
+ if batch is None:
677
+ raise ValueError(
678
+ f"Batch does not contain any data (`{batch}`). At the end of all iterable data available before expected stop iteration."
679
+ )
680
+
681
+ observed_batch_size = find_batch_size(batch)
682
+ batch_size = observed_batch_size // self.state.num_processes
683
+
684
+ stop_iteration = self._stop_iteration
685
+ if not stop_iteration:
686
+ # We may still be at the end of the dataloader without knowing it yet: if there is nothing left in
687
+ # the dataloader since the number of batches is a round multiple of the number of processes.
688
+ next_batch, next_batch_info = self._fetch_batches(main_iterator)
689
+ # next_batch_info[0] is None when there are no more batches, otherwise we still need to process them.
690
+ if self._stop_iteration and next_batch_info[0] is None:
691
+ stop_iteration = True
692
+
693
+ if not self._drop_last and stop_iteration and observed_batch_size % self.state.num_processes != 0:
694
+ # If the last batch is not complete, let's add the first batch to it.
695
+ batch = concatenate([batch, first_batch], dim=0)
696
+ # Batch size computation above is wrong, it's off by 1 so we fix it.
697
+ batch_size += 1
698
+
699
+ data_slice = slice(self.state.process_index * batch_size, (self.state.process_index + 1) * batch_size)
700
+ batch = self.slice_fn(
701
+ batch,
702
+ data_slice,
703
+ process_index=self.state.process_index,
704
+ num_processes=self.state.num_processes,
705
+ )
706
+
707
+ if stop_iteration:
708
+ self.end_of_dataloader = True
709
+ self.remainder = observed_batch_size
710
+ if batch_index >= self.skip_batches:
711
+ yield batch
712
+ batch_index += 1
713
+ self.iteration += 1
714
+ self.end()
715
+
716
+ def set_epoch(self, epoch: int):
717
+ # In case it is manually passed in, the user can set it to what they like
718
+ if self.iteration != epoch:
719
+ self.iteration = epoch
720
+ if hasattr(self.batch_sampler.sampler, "set_epoch"):
721
+ self.batch_sampler.sampler.set_epoch(epoch)
722
+ elif hasattr(self.dataset, "set_epoch"):
723
+ self.dataset.set_epoch(epoch)
724
+
725
+ def __len__(self):
726
+ whole_length = super().__len__()
727
+ if self.split_batches:
728
+ return whole_length
729
+ elif self._drop_last:
730
+ return whole_length // self.state.num_processes
731
+ else:
732
+ return math.ceil(whole_length / self.state.num_processes)
733
+
734
+ @property
735
+ def total_batch_size(self):
736
+ return (
737
+ self.dataset.batch_size if self.split_batches else (self.dataset.batch_size * self.dataset.num_processes)
738
+ )
739
+
740
+ @property
741
+ def total_dataset_length(self):
742
+ return len(self.dataset)
743
+
744
+
745
+ def prepare_data_loader(
746
+ dataloader: DataLoader,
747
+ device: Optional[torch.device] = None,
748
+ num_processes: Optional[int] = None,
749
+ process_index: Optional[int] = None,
750
+ split_batches: bool = False,
751
+ put_on_device: bool = False,
752
+ rng_types: Optional[List[Union[str, RNGType]]] = None,
753
+ dispatch_batches: Optional[bool] = None,
754
+ even_batches: bool = True,
755
+ slice_fn_for_dispatch: Optional[Callable] = None,
756
+ use_seedable_sampler: bool = False,
757
+ ) -> DataLoader:
758
+ """
759
+ Wraps a PyTorch `DataLoader` to generate batches for one of the processes only.
760
+
761
+ Depending on the value of the `drop_last` attribute of the `dataloader` passed, it will either stop the iteration
762
+ at the first batch that would be too small / not present on all processes or loop with indices from the beginning.
763
+
764
+ Args:
765
+ dataloader (`torch.utils.data.dataloader.DataLoader`):
766
+ The data loader to split across several devices.
767
+ device (`torch.device`):
768
+ The target device for the returned `DataLoader`.
769
+ num_processes (`int`, *optional*):
770
+ The number of processes running concurrently. Will default to the value given by
771
+ [`~state.AcceleratorState`].
772
+ process_index (`int`, *optional*):
773
+ The index of the current process. Will default to the value given by [`~state.AcceleratorState`].
774
+ split_batches (`bool`, *optional*, defaults to `False`):
775
+ Whether the resulting `DataLoader` should split the batches of the original data loader across devices or
776
+ yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of
777
+ `num_processes` batches at each iteration).
778
+
779
+ Another way to see this is that the observed batch size will be the same as the initial `dataloader` if
780
+ this option is set to `True`, the batch size of the initial `dataloader` multiplied by `num_processes`
781
+ otherwise.
782
+
783
+ Setting this option to `True` requires that the batch size of the `dataloader` is a round multiple of
784
+ `batch_size`.
785
+ put_on_device (`bool`, *optional*, defaults to `False`):
786
+ Whether or not to put the batches on `device` (only works if the batches are nested list, tuples or
787
+ dictionaries of tensors).
788
+ rng_types (list of `str` or [`~utils.RNGType`]):
789
+ The list of random number generators to synchronize at the beginning of each iteration. Should be one or
790
+ several of:
791
+
792
+ - `"torch"`: the base torch random number generator
793
+ - `"cuda"`: the CUDA random number generator (GPU only)
794
+ - `"xla"`: the XLA random number generator (TPU only)
795
+ - `"generator"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your
796
+ dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type.
797
+
798
+ dispatch_batches (`bool`, *optional*):
799
+ If set to `True`, the datalaoder prepared is only iterated through on the main process and then the batches
800
+ are split and broadcast to each process. Will default to `True` when the underlying dataset is an
801
+ `IterableDataset`, `False` otherwise.
802
+ even_batches (`bool`, *optional*, defaults to `True`):
803
+ If set to `True`, in cases where the total batch size across all processes does not exactly divide the
804
+ dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among
805
+ all workers.
806
+ slice_fn_for_dispatch (`Callable`, *optional*`):
807
+ If passed, this function will be used to slice tensors across `num_processes`. Will default to
808
+ [`~utils.slice_tensors`]. This argument is used only when `dispatch_batches` is set to `True` and will be
809
+ ignored otherwise.
810
+ use_seedable_sampler (`bool`, *optional*, defaults to `False`):
811
+ Whether to use the [`~data_loader.SeedableRandomSampler`] instead of a `RandomSampler` for better
812
+ reproducability. Comes at a cost of potentially different performances due to different shuffling
813
+ algorithms but ensures results will be the *exact* same. Should be paired with `set_seed()` at every
814
+ `self.set_epoch`
815
+
816
+ Returns:
817
+ `torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches
818
+
819
+ <Tip warning={true}>
820
+
821
+ `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches`
822
+ equal to `False`
823
+
824
+ </Tip>
825
+ """
826
+ if dispatch_batches is None:
827
+ if not put_on_device:
828
+ dispatch_batches = False
829
+ else:
830
+ dispatch_batches = isinstance(dataloader.dataset, IterableDataset)
831
+
832
+ if dispatch_batches and not put_on_device:
833
+ raise ValueError("Using `dispatch_batches=True` requires `put_on_device=True`.")
834
+ # Grab defaults from AcceleratorState
835
+ state = AcceleratorState()
836
+ if num_processes is None:
837
+ num_processes = state.num_processes
838
+ if process_index is None:
839
+ process_index = state.process_index
840
+
841
+ # Sanity check
842
+ if split_batches:
843
+ if dataloader.batch_size is not None:
844
+ batch_size_for_check = dataloader.batch_size
845
+ else:
846
+ # For custom batch_sampler
847
+ if hasattr(dataloader.batch_sampler, "batch_size"):
848
+ batch_size_for_check = dataloader.batch_sampler.batch_size
849
+ else:
850
+ raise ValueError(
851
+ "In order to use `split_batches==True` you must have a `batch_size` attribute either in the passed "
852
+ "`dataloader` or `dataloader.batch_sampler` objects, and it has to return a natural number. "
853
+ "Your `dataloader.batch_size` is None and `dataloader.batch_sampler` "
854
+ f"(`{type(dataloader.batch_sampler)}`) does not have the `batch_size` attribute set."
855
+ )
856
+
857
+ if batch_size_for_check > 1 and batch_size_for_check % num_processes != 0:
858
+ raise ValueError(
859
+ f"To use a `DataLoader` in `split_batches` mode, the batch size ({dataloader.batch_size}) "
860
+ f"needs to be a round multiple of the number of processes ({num_processes})."
861
+ )
862
+
863
+ new_dataset = dataloader.dataset
864
+ # Iterable dataset doesn't like batch_sampler, but data_loader creates a default one for it
865
+ new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None
866
+ sampler_is_batch_sampler = False
867
+ synchronized_generator = None
868
+ sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler)
869
+ if sampler_is_batch_sampler:
870
+ sampler = getattr(dataloader.sampler, "sampler", None)
871
+ else:
872
+ sampler = getattr(dataloader.batch_sampler, "sampler", None)
873
+ if isinstance(sampler, RandomSampler) and use_seedable_sampler:
874
+ # When iterating through the dataloader during distributed processes
875
+ # we want to ensure that on each process we are iterating through the same
876
+ # samples in the same order if a seed is set. This requires a tweak
877
+ # to the `torch.utils.data.RandomSampler` class (if used).
878
+ sampler = SeedableRandomSampler(
879
+ data_source=sampler.data_source,
880
+ replacement=sampler.replacement,
881
+ num_samples=sampler._num_samples,
882
+ generator=getattr(sampler, "generator", torch.Generator()),
883
+ )
884
+
885
+ if isinstance(dataloader.sampler, RandomSampler) and state.distributed_type == DistributedType.XLA:
886
+ # isinstance(dataloader.sampler, RandomSampler) indicates the original dataloader has `shuffle` enabled.
887
+ generator = torch.Generator().manual_seed(42)
888
+ dataloader.generator = generator
889
+ dataloader.sampler.generator = generator
890
+ # No change if no multiprocess
891
+ if (num_processes != 1 or state.distributed_type == DistributedType.MEGATRON_LM) and not dispatch_batches:
892
+ if isinstance(new_dataset, IterableDataset):
893
+ if getattr(dataloader.dataset, "generator", None) is not None:
894
+ synchronized_generator = dataloader.dataset.generator
895
+ new_dataset = IterableDatasetShard(
896
+ new_dataset,
897
+ batch_size=dataloader.batch_size,
898
+ drop_last=dataloader.drop_last,
899
+ num_processes=num_processes,
900
+ process_index=process_index,
901
+ split_batches=split_batches,
902
+ )
903
+ else:
904
+ batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler
905
+ new_batch_sampler = BatchSamplerShard(
906
+ batch_sampler,
907
+ num_processes=num_processes,
908
+ process_index=process_index,
909
+ split_batches=split_batches,
910
+ even_batches=even_batches,
911
+ )
912
+
913
+ # We ignore all of those since they are all dealt with by our new_batch_sampler
914
+ ignore_kwargs = [
915
+ "batch_size",
916
+ "shuffle",
917
+ "sampler",
918
+ "batch_sampler",
919
+ "drop_last",
920
+ ]
921
+
922
+ if rng_types is not None and synchronized_generator is None and "generator" in rng_types:
923
+ rng_types.remove("generator")
924
+
925
+ kwargs = {
926
+ k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k])
927
+ for k in _PYTORCH_DATALOADER_KWARGS
928
+ if k not in ignore_kwargs
929
+ }
930
+
931
+ # Need to provide batch_size as batch_sampler is None for Iterable dataset
932
+ if new_batch_sampler is None:
933
+ kwargs["drop_last"] = dataloader.drop_last
934
+ kwargs["batch_size"] = (
935
+ dataloader.batch_size // num_processes if split_batches and not dispatch_batches else dataloader.batch_size
936
+ )
937
+ if dispatch_batches:
938
+ kwargs.pop("generator")
939
+ dataloader = DataLoaderDispatcher(
940
+ new_dataset,
941
+ split_batches=split_batches,
942
+ batch_sampler=new_batch_sampler,
943
+ _drop_last=dataloader.drop_last,
944
+ slice_fn=slice_fn_for_dispatch,
945
+ **kwargs,
946
+ )
947
+ elif sampler_is_batch_sampler:
948
+ dataloader = DataLoaderShard(
949
+ new_dataset,
950
+ device=device if put_on_device and state.distributed_type != DistributedType.XLA else None,
951
+ sampler=new_batch_sampler,
952
+ batch_size=dataloader.batch_size,
953
+ rng_types=rng_types,
954
+ _drop_last=dataloader.drop_last,
955
+ synchronized_generator=synchronized_generator,
956
+ **kwargs,
957
+ )
958
+ else:
959
+ dataloader = DataLoaderShard(
960
+ new_dataset,
961
+ device=device if put_on_device and state.distributed_type != DistributedType.XLA else None,
962
+ batch_sampler=new_batch_sampler,
963
+ rng_types=rng_types,
964
+ synchronized_generator=synchronized_generator,
965
+ _drop_last=dataloader.drop_last,
966
+ **kwargs,
967
+ )
968
+
969
+ if isinstance(sampler, SeedableRandomSampler) and use_seedable_sampler:
970
+ if sampler_is_batch_sampler:
971
+ dataloader.sampler.sampler = sampler
972
+ else:
973
+ dataloader.batch_sampler.sampler = sampler
974
+ if hasattr(dataloader.batch_sampler, "batch_sampler"):
975
+ dataloader.batch_sampler.batch_sampler.sampler = sampler
976
+ if state.distributed_type == DistributedType.XLA:
977
+ return MpDeviceLoaderWrapper(dataloader, device)
978
+ return dataloader
979
+
980
+
981
+ class SkipBatchSampler(BatchSampler):
982
+ """
983
+ A `torch.utils.data.BatchSampler` that skips the first `n` batches of another `torch.utils.data.BatchSampler`.
984
+ """
985
+
986
+ def __init__(self, batch_sampler, skip_batches=0):
987
+ self.batch_sampler = batch_sampler
988
+ self.skip_batches = skip_batches
989
+
990
+ def __iter__(self):
991
+ for index, samples in enumerate(self.batch_sampler):
992
+ if index >= self.skip_batches:
993
+ yield samples
994
+
995
+ @property
996
+ def total_length(self):
997
+ return len(self.batch_sampler)
998
+
999
+ def __len__(self):
1000
+ return len(self.batch_sampler) - self.skip_batches
1001
+
1002
+
1003
+ class SkipDataLoader(DataLoader):
1004
+ """
1005
+ Subclass of a PyTorch `DataLoader` that will skip the first batches.
1006
+
1007
+ Args:
1008
+ dataset (`torch.utils.data.dataset.Dataset`):
1009
+ The dataset to use to build this datalaoder.
1010
+ skip_batches (`int`, *optional*, defaults to 0):
1011
+ The number of batches to skip at the beginning.
1012
+ kwargs:
1013
+ All other keyword arguments to pass to the regular `DataLoader` initialization.
1014
+ """
1015
+
1016
+ def __init__(self, dataset, skip_batches=0, **kwargs):
1017
+ super().__init__(dataset, **kwargs)
1018
+ self.skip_batches = skip_batches
1019
+
1020
+ def __iter__(self):
1021
+ for index, batch in enumerate(super().__iter__()):
1022
+ if index >= self.skip_batches:
1023
+ yield batch
1024
+
1025
+
1026
+ def skip_first_batches(dataloader, num_batches=0):
1027
+ """
1028
+ Creates a `torch.utils.data.DataLoader` that will efficiently skip the first `num_batches`.
1029
+ """
1030
+ dataset = dataloader.dataset
1031
+ sampler_is_batch_sampler = False
1032
+ if isinstance(dataset, IterableDataset):
1033
+ new_batch_sampler = None
1034
+ else:
1035
+ sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler)
1036
+ batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler
1037
+ new_batch_sampler = SkipBatchSampler(batch_sampler, skip_batches=num_batches)
1038
+
1039
+ # We ignore all of those since they are all dealt with by our new_batch_sampler
1040
+ ignore_kwargs = [
1041
+ "batch_size",
1042
+ "shuffle",
1043
+ "sampler",
1044
+ "batch_sampler",
1045
+ "drop_last",
1046
+ ]
1047
+
1048
+ kwargs = {
1049
+ k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k])
1050
+ for k in _PYTORCH_DATALOADER_KWARGS
1051
+ if k not in ignore_kwargs
1052
+ }
1053
+
1054
+ # Need to provide batch_size as batch_sampler is None for Iterable dataset
1055
+ if new_batch_sampler is None:
1056
+ kwargs["drop_last"] = dataloader.drop_last
1057
+ kwargs["batch_size"] = dataloader.batch_size
1058
+
1059
+ if isinstance(dataloader, DataLoaderDispatcher):
1060
+ if new_batch_sampler is None:
1061
+ # Need to manually skip batches in the dataloader
1062
+ kwargs["skip_batches"] = num_batches
1063
+ dataloader = DataLoaderDispatcher(
1064
+ dataset,
1065
+ split_batches=dataloader.split_batches,
1066
+ batch_sampler=new_batch_sampler,
1067
+ _drop_last=dataloader._drop_last,
1068
+ **kwargs,
1069
+ )
1070
+ elif isinstance(dataloader, DataLoaderShard):
1071
+ if new_batch_sampler is None:
1072
+ # Need to manually skip batches in the dataloader
1073
+ kwargs["skip_batches"] = num_batches
1074
+ elif sampler_is_batch_sampler:
1075
+ kwargs["sampler"] = new_batch_sampler
1076
+ kwargs["batch_size"] = dataloader.batch_size
1077
+ else:
1078
+ kwargs["batch_sampler"] = new_batch_sampler
1079
+ dataloader = DataLoaderShard(
1080
+ dataset,
1081
+ device=dataloader.device,
1082
+ rng_types=dataloader.rng_types,
1083
+ synchronized_generator=dataloader.synchronized_generator,
1084
+ **kwargs,
1085
+ )
1086
+ else:
1087
+ if new_batch_sampler is None:
1088
+ # Need to manually skip batches in the dataloader
1089
+ dataloader = SkipDataLoader(dataset, skip_batches=num_batches, **kwargs)
1090
+ else:
1091
+ dataloader = DataLoader(dataset, batch_sampler=new_batch_sampler, **kwargs)
1092
+
1093
+ return dataloader
venv/lib/python3.10/site-packages/accelerate/hooks.py ADDED
@@ -0,0 +1,709 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import functools
16
+ from typing import Dict, List, Mapping, Optional, Union
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+
21
+ from .state import PartialState
22
+ from .utils import (
23
+ PrefixedDataset,
24
+ find_device,
25
+ named_module_tensors,
26
+ send_to_device,
27
+ set_module_tensor_to_device,
28
+ )
29
+ from .utils.modeling import get_non_persistent_buffers
30
+ from .utils.other import recursive_getattr
31
+
32
+
33
+ class ModelHook:
34
+ """
35
+ A hook that contains callbacks to be executed just before and after the forward method of a model. The difference
36
+ with PyTorch existing hooks is that they get passed along the kwargs.
37
+
38
+ Class attribute:
39
+ - **no_grad** (`bool`, *optional*, defaults to `False`) -- Whether or not to execute the actual forward pass under
40
+ the `torch.no_grad()` context manager.
41
+ """
42
+
43
+ no_grad = False
44
+
45
+ def init_hook(self, module):
46
+ """
47
+ To be executed when the hook is attached to the module.
48
+
49
+ Args:
50
+ module (`torch.nn.Module`): The module attached to this hook.
51
+ """
52
+ return module
53
+
54
+ def pre_forward(self, module, *args, **kwargs):
55
+ """
56
+ To be executed just before the forward method of the model.
57
+
58
+ Args:
59
+ module (`torch.nn.Module`): The module whose forward pass will be executed just after this event.
60
+ args (`Tuple[Any]`): The positional arguments passed to the module.
61
+ kwargs (`Dict[Str, Any]`): The keyword arguments passed to the module.
62
+
63
+ Returns:
64
+ `Tuple[Tuple[Any], Dict[Str, Any]]`: A tuple with the treated `args` and `kwargs`.
65
+ """
66
+ return args, kwargs
67
+
68
+ def post_forward(self, module, output):
69
+ """
70
+ To be executed just after the forward method of the model.
71
+
72
+ Args:
73
+ module (`torch.nn.Module`): The module whose forward pass been executed just before this event.
74
+ output (`Any`): The output of the module.
75
+
76
+ Returns:
77
+ `Any`: The processed `output`.
78
+ """
79
+ return output
80
+
81
+ def detach_hook(self, module):
82
+ """
83
+ To be executed when the hook is detached from a module.
84
+
85
+ Args:
86
+ module (`torch.nn.Module`): The module detached from this hook.
87
+ """
88
+ return module
89
+
90
+
91
+ class SequentialHook(ModelHook):
92
+ """
93
+ A hook that can contain several hooks and iterates through them at each event.
94
+ """
95
+
96
+ def __init__(self, *hooks):
97
+ self.hooks = hooks
98
+
99
+ def init_hook(self, module):
100
+ for hook in self.hooks:
101
+ module = hook.init_hook(module)
102
+ return module
103
+
104
+ def pre_forward(self, module, *args, **kwargs):
105
+ for hook in self.hooks:
106
+ args, kwargs = hook.pre_forward(module, *args, **kwargs)
107
+ return args, kwargs
108
+
109
+ def post_forward(self, module, output):
110
+ for hook in self.hooks:
111
+ output = hook.post_forward(module, output)
112
+ return output
113
+
114
+ def detach_hook(self, module):
115
+ for hook in self.hooks:
116
+ module = hook.detach_hook(module)
117
+ return module
118
+
119
+
120
+ def add_hook_to_module(module: nn.Module, hook: ModelHook, append: bool = False):
121
+ """
122
+ Adds a hook to a given module. This will rewrite the `forward` method of the module to include the hook, to remove
123
+ this behavior and restore the original `forward` method, use `remove_hook_from_module`.
124
+
125
+ <Tip warning={true}>
126
+
127
+ If the module already contains a hook, this will replace it with the new hook passed by default. To chain two hooks
128
+ together, pass `append=True`, so it chains the current and new hook into an instance of the `SequentialHook` class.
129
+
130
+ </Tip>
131
+
132
+ Args:
133
+ module (`torch.nn.Module`):
134
+ The module to attach a hook to.
135
+ hook (`ModelHook`):
136
+ The hook to attach.
137
+ append (`bool`, *optional*, defaults to `False`):
138
+ Whether the hook should be chained with an existing one (if module already contains a hook) or not.
139
+
140
+ Returns:
141
+ `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can
142
+ be discarded).
143
+ """
144
+
145
+ if append and (getattr(module, "_hf_hook", None) is not None):
146
+ old_hook = module._hf_hook
147
+ remove_hook_from_module(module)
148
+ hook = SequentialHook(old_hook, hook)
149
+
150
+ if hasattr(module, "_hf_hook") and hasattr(module, "_old_forward"):
151
+ # If we already put some hook on this module, we replace it with the new one.
152
+ old_forward = module._old_forward
153
+ else:
154
+ old_forward = module.forward
155
+ module._old_forward = old_forward
156
+
157
+ module = hook.init_hook(module)
158
+ module._hf_hook = hook
159
+
160
+ def new_forward(module, *args, **kwargs):
161
+ args, kwargs = module._hf_hook.pre_forward(module, *args, **kwargs)
162
+ if module._hf_hook.no_grad:
163
+ with torch.no_grad():
164
+ output = module._old_forward(*args, **kwargs)
165
+ else:
166
+ output = module._old_forward(*args, **kwargs)
167
+ return module._hf_hook.post_forward(module, output)
168
+
169
+ # Overriding a GraphModuleImpl forward freezes the forward call and later modifications on the graph will fail.
170
+ # Reference: https://pytorch.slack.com/archives/C3PDTEV8E/p1705929610405409
171
+ if "GraphModuleImpl" in str(type(module)):
172
+ module.__class__.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward)
173
+ else:
174
+ module.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward)
175
+
176
+ return module
177
+
178
+
179
+ def remove_hook_from_module(module: nn.Module, recurse=False):
180
+ """
181
+ Removes any hook attached to a module via `add_hook_to_module`.
182
+
183
+ Args:
184
+ module (`torch.nn.Module`): The module to attach a hook to.
185
+ recurse (`bool`, **optional**): Whether to remove the hooks recursively
186
+
187
+ Returns:
188
+ `torch.nn.Module`: The same module, with the hook detached (the module is modified in place, so the result can
189
+ be discarded).
190
+ """
191
+
192
+ if hasattr(module, "_hf_hook"):
193
+ module._hf_hook.detach_hook(module)
194
+ delattr(module, "_hf_hook")
195
+
196
+ if hasattr(module, "_old_forward"):
197
+ # Overriding a GraphModuleImpl forward freezes the forward call and later modifications on the graph will fail.
198
+ # Reference: https://pytorch.slack.com/archives/C3PDTEV8E/p1705929610405409
199
+ if "GraphModuleImpl" in str(type(module)):
200
+ module.__class__.forward = module._old_forward
201
+ else:
202
+ module.forward = module._old_forward
203
+ delattr(module, "_old_forward")
204
+
205
+ if recurse:
206
+ for child in module.children():
207
+ remove_hook_from_module(child, recurse)
208
+
209
+ return module
210
+
211
+
212
+ class AlignDevicesHook(ModelHook):
213
+ """
214
+ A generic `ModelHook` that ensures inputs and model weights are on the same device for the forward pass of the
215
+ associated module, potentially offloading the weights after the forward pass.
216
+
217
+ Args:
218
+ execution_device (`torch.device`, *optional*):
219
+ The device on which inputs and model weights should be placed before the forward pass.
220
+ offload (`bool`, *optional*, defaults to `False`):
221
+ Whether or not the weights should be offloaded after the forward pass.
222
+ io_same_device (`bool`, *optional*, defaults to `False`):
223
+ Whether or not the output should be placed on the same device as the input was.
224
+ weights_map (`Mapping[str, torch.Tensor]`, *optional*):
225
+ When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.
226
+ offload_buffers (`bool`, *optional*, defaults to `False`):
227
+ Whether or not to include the associated module's buffers when offloading.
228
+ place_submodules (`bool`, *optional*, defaults to `False`):
229
+ Whether to place the submodules on `execution_device` during the `init_hook` event.
230
+ """
231
+
232
+ def __init__(
233
+ self,
234
+ execution_device: Optional[Union[int, str, torch.device]] = None,
235
+ offload: bool = False,
236
+ io_same_device: bool = False,
237
+ weights_map: Optional[Mapping] = None,
238
+ offload_buffers: bool = False,
239
+ place_submodules: bool = False,
240
+ skip_keys: Optional[Union[str, List[str]]] = None,
241
+ tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
242
+ ):
243
+ self.execution_device = execution_device
244
+ self.offload = offload
245
+ self.io_same_device = io_same_device
246
+ self.weights_map = weights_map
247
+ self.offload_buffers = offload_buffers
248
+ self.place_submodules = place_submodules
249
+ self.skip_keys = skip_keys
250
+
251
+ # Will contain the input device when `io_same_device=True`.
252
+ self.input_device = None
253
+ self.param_original_devices = {}
254
+ self.buffer_original_devices = {}
255
+ self.tied_params_names = set()
256
+
257
+ # The hook pre_forward/post_forward need to have knowledge of this dictionary, as with offloading we want to avoid duplicating memory
258
+ # for tied weights already loaded on the target execution device.
259
+ self.tied_params_map = tied_params_map
260
+
261
+ def __repr__(self):
262
+ return (
263
+ f"AlignDevicesHook(execution_device={self.execution_device}, offload={self.offload}, "
264
+ f"io_same_device={self.io_same_device}, offload_buffers={self.offload_buffers}, "
265
+ f"place_submodules={self.place_submodules}, skip_keys={repr(self.skip_keys)})"
266
+ )
267
+
268
+ def init_hook(self, module):
269
+ # In case the AlignDevicesHook is on meta device, ignore tied weights as data_ptr() is then always zero.
270
+ if self.execution_device == "meta" or self.execution_device == torch.device("meta"):
271
+ self.tied_params_map = None
272
+
273
+ if not self.offload and self.execution_device is not None:
274
+ for name, _ in named_module_tensors(module, recurse=self.place_submodules):
275
+ set_module_tensor_to_device(module, name, self.execution_device, tied_params_map=self.tied_params_map)
276
+ elif self.offload:
277
+ self.original_devices = {
278
+ name: param.device for name, param in named_module_tensors(module, recurse=self.place_submodules)
279
+ }
280
+ if self.weights_map is None:
281
+ self.weights_map = {
282
+ name: param.to("cpu")
283
+ for name, param in named_module_tensors(
284
+ module, include_buffers=self.offload_buffers, recurse=self.place_submodules
285
+ )
286
+ }
287
+ for name, _ in named_module_tensors(
288
+ module, include_buffers=self.offload_buffers, recurse=self.place_submodules, remove_non_persistent=True
289
+ ):
290
+ # When using disk offloading, we can not rely on `weights_map[name].data_ptr()` as the reference pointer,
291
+ # as we have no guarantee that safetensors' `file.get_tensor()` will always give the same pointer.
292
+ # As we have no reliable way to track the shared data pointer of tied weights in this case, we use tied_params_names: List[str]
293
+ # to add on the fly pointers to `tied_params_map` in the pre_forward call.
294
+ if (
295
+ self.tied_params_map is not None
296
+ and recursive_getattr(module, name).data_ptr() in self.tied_params_map
297
+ ):
298
+ self.tied_params_names.add(name)
299
+
300
+ set_module_tensor_to_device(module, name, "meta")
301
+
302
+ if not self.offload_buffers and self.execution_device is not None:
303
+ for name, _ in module.named_buffers(recurse=self.place_submodules):
304
+ set_module_tensor_to_device(
305
+ module, name, self.execution_device, tied_params_map=self.tied_params_map
306
+ )
307
+ elif self.offload_buffers and self.execution_device is not None:
308
+ for name in get_non_persistent_buffers(module, recurse=self.place_submodules):
309
+ set_module_tensor_to_device(
310
+ module, name, self.execution_device, tied_params_map=self.tied_params_map
311
+ )
312
+
313
+ return module
314
+
315
+ def pre_forward(self, module, *args, **kwargs):
316
+ if self.io_same_device:
317
+ self.input_device = find_device([args, kwargs])
318
+ if self.offload:
319
+ self.tied_pointers_to_remove = set()
320
+
321
+ for name, _ in named_module_tensors(
322
+ module,
323
+ include_buffers=self.offload_buffers,
324
+ recurse=self.place_submodules,
325
+ remove_non_persistent=True,
326
+ ):
327
+ fp16_statistics = None
328
+ value = self.weights_map[name]
329
+ if "weight" in name and name.replace("weight", "SCB") in self.weights_map.keys():
330
+ if value.dtype == torch.int8:
331
+ fp16_statistics = self.weights_map[name.replace("weight", "SCB")]
332
+
333
+ # In case we are using offloading with tied weights, we need to keep track of the offloaded weights
334
+ # that are loaded on device at this point, as we will need to remove them as well from the dictionary
335
+ # self.tied_params_map in order to allow to free memory.
336
+ if name in self.tied_params_names and value.data_ptr() not in self.tied_params_map:
337
+ self.tied_params_map[value.data_ptr()] = {}
338
+
339
+ if (
340
+ value is not None
341
+ and self.tied_params_map is not None
342
+ and value.data_ptr() in self.tied_params_map
343
+ and self.execution_device not in self.tied_params_map[value.data_ptr()]
344
+ ):
345
+ self.tied_pointers_to_remove.add((value.data_ptr(), self.execution_device))
346
+
347
+ set_module_tensor_to_device(
348
+ module,
349
+ name,
350
+ self.execution_device,
351
+ value=value,
352
+ fp16_statistics=fp16_statistics,
353
+ tied_params_map=self.tied_params_map,
354
+ )
355
+
356
+ return send_to_device(args, self.execution_device), send_to_device(
357
+ kwargs, self.execution_device, skip_keys=self.skip_keys
358
+ )
359
+
360
+ def post_forward(self, module, output):
361
+ if self.offload:
362
+ for name, _ in named_module_tensors(
363
+ module,
364
+ include_buffers=self.offload_buffers,
365
+ recurse=self.place_submodules,
366
+ remove_non_persistent=True,
367
+ ):
368
+ set_module_tensor_to_device(module, name, "meta")
369
+ if type(module).__name__ == "Linear8bitLt":
370
+ module.state.SCB = None
371
+ module.state.CxB = None
372
+
373
+ # We may have loaded tied weights into self.tied_params_map (avoiding to load them several times in e.g. submodules): remove them from
374
+ # this dictionary to allow the garbage collector to do its job.
375
+ for value_pointer, device in self.tied_pointers_to_remove:
376
+ del self.tied_params_map[value_pointer][device]
377
+ self.tied_pointers_to_remove = set()
378
+
379
+ if self.io_same_device and self.input_device is not None:
380
+ output = send_to_device(output, self.input_device, skip_keys=self.skip_keys)
381
+
382
+ return output
383
+
384
+ def detach_hook(self, module):
385
+ if self.offload:
386
+ for name, device in self.original_devices.items():
387
+ if device != torch.device("meta"):
388
+ set_module_tensor_to_device(module, name, device, value=self.weights_map.get(name, None))
389
+ return module
390
+
391
+
392
+ def attach_execution_device_hook(
393
+ module: torch.nn.Module,
394
+ execution_device: Union[int, str, torch.device],
395
+ skip_keys: Optional[Union[str, List[str]]] = None,
396
+ preload_module_classes: Optional[List[str]] = None,
397
+ tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
398
+ ):
399
+ """
400
+ Recursively attaches `AlignDevicesHook` to all submodules of a given model to make sure they have the right
401
+ execution device
402
+
403
+ Args:
404
+ module (`torch.nn.Module`):
405
+ The module where we want to attach the hooks.
406
+ execution_device (`int`, `str` or `torch.device`):
407
+ The device on which inputs and model weights should be placed before the forward pass.
408
+ skip_keys (`str` or `List[str]`, *optional*):
409
+ A list of keys to ignore when moving inputs or outputs between devices.
410
+ preload_module_classes (`List[str]`, *optional*):
411
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
412
+ of the forward. This should only be used for classes that have submodules which are registered but not
413
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
414
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
415
+ tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`):
416
+ A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution
417
+ device, this parameter is useful to reuse the first available pointer of a shared weight for all others,
418
+ instead of duplicating memory.
419
+ """
420
+ if not hasattr(module, "_hf_hook") and len(module.state_dict()) > 0:
421
+ add_hook_to_module(
422
+ module,
423
+ AlignDevicesHook(execution_device, skip_keys=skip_keys, tied_params_map=tied_params_map),
424
+ )
425
+
426
+ # Break the recursion if we get to a preload module.
427
+ if preload_module_classes is not None and module.__class__.__name__ in preload_module_classes:
428
+ return
429
+
430
+ for child in module.children():
431
+ attach_execution_device_hook(child, execution_device, tied_params_map=tied_params_map)
432
+
433
+
434
+ def attach_align_device_hook(
435
+ module: torch.nn.Module,
436
+ execution_device: Optional[torch.device] = None,
437
+ offload: bool = False,
438
+ weights_map: Optional[Mapping] = None,
439
+ offload_buffers: bool = False,
440
+ module_name: str = "",
441
+ skip_keys: Optional[Union[str, List[str]]] = None,
442
+ preload_module_classes: Optional[List[str]] = None,
443
+ tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
444
+ ):
445
+ """
446
+ Recursively attaches `AlignDevicesHook` to all submodules of a given model that have direct parameters and/or
447
+ buffers.
448
+
449
+ Args:
450
+ module (`torch.nn.Module`):
451
+ The module where we want to attach the hooks.
452
+ execution_device (`torch.device`, *optional*):
453
+ The device on which inputs and model weights should be placed before the forward pass.
454
+ offload (`bool`, *optional*, defaults to `False`):
455
+ Whether or not the weights should be offloaded after the forward pass.
456
+ weights_map (`Mapping[str, torch.Tensor]`, *optional*):
457
+ When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.
458
+ offload_buffers (`bool`, *optional*, defaults to `False`):
459
+ Whether or not to include the associated module's buffers when offloading.
460
+ module_name (`str`, *optional*, defaults to `""`):
461
+ The name of the module.
462
+ skip_keys (`str` or `List[str]`, *optional*):
463
+ A list of keys to ignore when moving inputs or outputs between devices.
464
+ preload_module_classes (`List[str]`, *optional*):
465
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
466
+ of the forward. This should only be used for classes that have submodules which are registered but not
467
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
468
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
469
+ tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`):
470
+ A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution
471
+ device, this parameter is useful to reuse the first available pointer of a shared weight for all others,
472
+ instead of duplicating memory.
473
+ """
474
+ # Attach the hook on this module if it has any direct tensor.
475
+ directs = named_module_tensors(module)
476
+ full_offload = (
477
+ offload and preload_module_classes is not None and module.__class__.__name__ in preload_module_classes
478
+ )
479
+
480
+ if len(list(directs)) > 0 or full_offload:
481
+ if weights_map is not None:
482
+ prefix = f"{module_name}." if len(module_name) > 0 else ""
483
+ prefixed_weights_map = PrefixedDataset(weights_map, prefix)
484
+ else:
485
+ prefixed_weights_map = None
486
+ hook = AlignDevicesHook(
487
+ execution_device=execution_device,
488
+ offload=offload,
489
+ weights_map=prefixed_weights_map,
490
+ offload_buffers=offload_buffers,
491
+ place_submodules=full_offload,
492
+ skip_keys=skip_keys,
493
+ tied_params_map=tied_params_map,
494
+ )
495
+ add_hook_to_module(module, hook, append=True)
496
+
497
+ # We stop the recursion in case we hit the full offload.
498
+ if full_offload:
499
+ return
500
+
501
+ # Recurse on all children of the module.
502
+ for child_name, child in module.named_children():
503
+ child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name
504
+ attach_align_device_hook(
505
+ child,
506
+ execution_device=execution_device,
507
+ offload=offload,
508
+ weights_map=weights_map,
509
+ offload_buffers=offload_buffers,
510
+ module_name=child_name,
511
+ preload_module_classes=preload_module_classes,
512
+ skip_keys=skip_keys,
513
+ tied_params_map=tied_params_map,
514
+ )
515
+
516
+
517
+ def remove_hook_from_submodules(module: nn.Module):
518
+ """
519
+ Recursively removes all hooks attached on the submodules of a given model.
520
+
521
+ Args:
522
+ module (`torch.nn.Module`): The module on which to remove all hooks.
523
+ """
524
+ remove_hook_from_module(module)
525
+ for child in module.children():
526
+ remove_hook_from_submodules(child)
527
+
528
+
529
+ def attach_align_device_hook_on_blocks(
530
+ module: nn.Module,
531
+ execution_device: Optional[Union[torch.device, Dict[str, torch.device]]] = None,
532
+ offload: Union[bool, Dict[str, bool]] = False,
533
+ weights_map: Mapping = None,
534
+ offload_buffers: bool = False,
535
+ module_name: str = "",
536
+ skip_keys: Optional[Union[str, List[str]]] = None,
537
+ preload_module_classes: Optional[List[str]] = None,
538
+ tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
539
+ ):
540
+ """
541
+ Attaches `AlignDevicesHook` to all blocks of a given model as needed.
542
+
543
+ Args:
544
+ module (`torch.nn.Module`):
545
+ The module where we want to attach the hooks.
546
+ execution_device (`torch.device` or `Dict[str, torch.device]`, *optional*):
547
+ The device on which inputs and model weights should be placed before the forward pass. It can be one device
548
+ for the whole module, or a dictionary mapping module name to device.
549
+ offload (`bool`, *optional*, defaults to `False`):
550
+ Whether or not the weights should be offloaded after the forward pass. It can be one boolean for the whole
551
+ module, or a dictionary mapping module name to boolean.
552
+ weights_map (`Mapping[str, torch.Tensor]`, *optional*):
553
+ When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.
554
+ offload_buffers (`bool`, *optional*, defaults to `False`):
555
+ Whether or not to include the associated module's buffers when offloading.
556
+ module_name (`str`, *optional*, defaults to `""`):
557
+ The name of the module.
558
+ skip_keys (`str` or `List[str]`, *optional*):
559
+ A list of keys to ignore when moving inputs or outputs between devices.
560
+ preload_module_classes (`List[str]`, *optional*):
561
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
562
+ of the forward. This should only be used for classes that have submodules which are registered but not
563
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
564
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
565
+ tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`):
566
+ A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution
567
+ device, this parameter is useful to reuse the first available pointer of a shared weight for all others,
568
+ instead of duplicating memory.
569
+ """
570
+ # If one device and one offload, we've got one hook.
571
+ if not isinstance(execution_device, Mapping) and not isinstance(offload, dict):
572
+ if not offload:
573
+ hook = AlignDevicesHook(
574
+ execution_device=execution_device,
575
+ io_same_device=True,
576
+ skip_keys=skip_keys,
577
+ place_submodules=True,
578
+ tied_params_map=tied_params_map,
579
+ )
580
+ add_hook_to_module(module, hook)
581
+ else:
582
+ attach_align_device_hook(
583
+ module,
584
+ execution_device=execution_device,
585
+ offload=True,
586
+ weights_map=weights_map,
587
+ offload_buffers=offload_buffers,
588
+ module_name=module_name,
589
+ skip_keys=skip_keys,
590
+ tied_params_map=tied_params_map,
591
+ )
592
+ return
593
+
594
+ if not isinstance(execution_device, Mapping):
595
+ execution_device = {key: execution_device for key in offload.keys()}
596
+ if not isinstance(offload, Mapping):
597
+ offload = {key: offload for key in execution_device.keys()}
598
+
599
+ if module_name in execution_device and module_name in offload and not offload[module_name]:
600
+ hook = AlignDevicesHook(
601
+ execution_device=execution_device[module_name],
602
+ offload_buffers=offload_buffers,
603
+ io_same_device=(module_name == ""),
604
+ place_submodules=True,
605
+ skip_keys=skip_keys,
606
+ tied_params_map=tied_params_map,
607
+ )
608
+ add_hook_to_module(module, hook)
609
+ attach_execution_device_hook(module, execution_device[module_name], tied_params_map=tied_params_map)
610
+ elif module_name in execution_device and module_name in offload:
611
+ attach_align_device_hook(
612
+ module,
613
+ execution_device=execution_device[module_name],
614
+ offload=True,
615
+ weights_map=weights_map,
616
+ offload_buffers=offload_buffers,
617
+ module_name=module_name,
618
+ skip_keys=skip_keys,
619
+ preload_module_classes=preload_module_classes,
620
+ tied_params_map=tied_params_map,
621
+ )
622
+ if not hasattr(module, "_hf_hook"):
623
+ hook = AlignDevicesHook(
624
+ execution_device=execution_device[module_name],
625
+ io_same_device=(module_name == ""),
626
+ skip_keys=skip_keys,
627
+ tied_params_map=tied_params_map,
628
+ )
629
+ add_hook_to_module(module, hook)
630
+ attach_execution_device_hook(
631
+ module,
632
+ execution_device[module_name],
633
+ preload_module_classes=preload_module_classes,
634
+ skip_keys=skip_keys,
635
+ tied_params_map=tied_params_map,
636
+ )
637
+ elif module_name == "":
638
+ hook = AlignDevicesHook(
639
+ execution_device=execution_device.get(""),
640
+ io_same_device=True,
641
+ skip_keys=skip_keys,
642
+ tied_params_map=tied_params_map,
643
+ )
644
+ add_hook_to_module(module, hook)
645
+
646
+ for child_name, child in module.named_children():
647
+ child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name
648
+ attach_align_device_hook_on_blocks(
649
+ child,
650
+ execution_device=execution_device,
651
+ offload=offload,
652
+ weights_map=weights_map,
653
+ offload_buffers=offload_buffers,
654
+ module_name=child_name,
655
+ preload_module_classes=preload_module_classes,
656
+ skip_keys=skip_keys,
657
+ tied_params_map=tied_params_map,
658
+ )
659
+
660
+
661
+ class CpuOffload(ModelHook):
662
+ """
663
+ Offloads a model on the CPU until its forward pass is called. The model will not be offloaded back to the CPU after
664
+ the forward, the user needs to call the `init_hook` method again for this.
665
+
666
+ Args:
667
+ execution_device(`str`, `int` or `torch.device`, *optional*):
668
+ The device on which the model should be executed. Will default to the MPS device if it's available, then
669
+ GPU 0 if there is a GPU, and finally to the CPU.
670
+ prev_module_hook (`UserCpuOffloadHook`, *optional*):
671
+ The hook sent back by [`cpu_offload_with_hook`] for a previous model in the pipeline you are running. If
672
+ passed, its offload method will be called just before the forward of the model to which this hook is
673
+ attached.
674
+ """
675
+
676
+ def __init__(
677
+ self,
678
+ execution_device: Optional[Union[str, int, torch.device]] = None,
679
+ prev_module_hook: Optional["UserCpuOffloadHook"] = None,
680
+ ):
681
+ self.prev_module_hook = prev_module_hook
682
+
683
+ self.execution_device = execution_device if execution_device is not None else PartialState().default_device
684
+
685
+ def init_hook(self, module):
686
+ return module.to("cpu")
687
+
688
+ def pre_forward(self, module, *args, **kwargs):
689
+ if self.prev_module_hook is not None:
690
+ self.prev_module_hook.offload()
691
+ module.to(self.execution_device)
692
+ return send_to_device(args, self.execution_device), send_to_device(kwargs, self.execution_device)
693
+
694
+
695
+ class UserCpuOffloadHook:
696
+ """
697
+ A simple hook grouping a model and a `ModelHook`, which provides easy APIs for to call the init method of the hook
698
+ or remove it entirely.
699
+ """
700
+
701
+ def __init__(self, model, hook):
702
+ self.model = model
703
+ self.hook = hook
704
+
705
+ def offload(self):
706
+ self.hook.init_hook(self.model)
707
+
708
+ def remove(self):
709
+ remove_hook_from_module(self.model)
venv/lib/python3.10/site-packages/accelerate/inference.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import math
15
+ from types import MethodType
16
+ from typing import Any, Dict, List, Optional, Tuple, Union
17
+
18
+ from .state import PartialState
19
+ from .utils import (
20
+ calculate_maximum_sizes,
21
+ convert_bytes,
22
+ copy_tensor_to_devices,
23
+ ignorant_find_batch_size,
24
+ infer_auto_device_map,
25
+ is_pippy_available,
26
+ pad_input_tensors,
27
+ send_to_device,
28
+ )
29
+
30
+
31
+ if is_pippy_available():
32
+ from pippy.IR import Pipe, PipeSplitWrapper, annotate_split_points
33
+ from pippy.PipelineStage import PipelineStage
34
+
35
+
36
+ def generate_device_map(model, num_processes: int = 1, no_split_module_classes=None, max_memory: dict = None):
37
+ """
38
+ Calculates the device map for `model` with an offset for PiPPy
39
+ """
40
+ if num_processes == 1:
41
+ return infer_auto_device_map(model, no_split_module_classes=no_split_module_classes, clean_result=False)
42
+ if max_memory is None:
43
+ model_size, shared = calculate_maximum_sizes(model)
44
+
45
+ # Split into `n` chunks for each GPU
46
+ memory = (model_size + shared[0]) / num_processes
47
+ memory = convert_bytes(memory)
48
+ value, ending = memory.split(" ")
49
+
50
+ # Add a chunk to deal with potential extra shared memory instances
51
+ memory = math.ceil(float(value)) * 1.1
52
+ memory = f"{memory} {ending}"
53
+ max_memory = {i: memory for i in range(num_processes)}
54
+ device_map = infer_auto_device_map(
55
+ model,
56
+ max_memory=max_memory,
57
+ no_split_module_classes=no_split_module_classes,
58
+ clean_result=False,
59
+ )
60
+ return device_map
61
+
62
+
63
+ def find_pippy_batch_size(args, kwargs):
64
+ found_batch_size = None
65
+ if args is not None:
66
+ for arg in args:
67
+ found_batch_size = ignorant_find_batch_size(arg)
68
+ if found_batch_size is not None:
69
+ break
70
+ if kwargs is not None and found_batch_size is None:
71
+ for kwarg in kwargs.values():
72
+ found_batch_size = ignorant_find_batch_size(kwarg)
73
+ if found_batch_size is not None:
74
+ break
75
+ return found_batch_size
76
+
77
+
78
+ def build_pipeline(model, split_points, args, kwargs, num_chunks):
79
+ """
80
+ Attaches the split points to the model based on `self.device_map` and generates a `PipelineStage`. Requires passing
81
+ in needed `args` and `kwargs` as the model needs on the CPU.
82
+
83
+ Users can pass in custom `num_chunks` as an optional hyper-parameter. By default will use
84
+ `AcceleratorState.num_processes`
85
+ """
86
+ # We need to annotate the split points in the model for PiPPy
87
+ state = PartialState()
88
+ annotate_split_points(model, {split_point: PipeSplitWrapper.SplitPoint.BEGINNING for split_point in split_points})
89
+ found_batch_size = find_pippy_batch_size(args, kwargs)
90
+ if found_batch_size != num_chunks:
91
+ if args is not None:
92
+ args = pad_input_tensors(args, found_batch_size, num_chunks)
93
+ if kwargs is not None:
94
+ kwargs = pad_input_tensors(kwargs, found_batch_size, num_chunks)
95
+ pipe = Pipe.from_tracing(model, num_chunks=num_chunks, example_args=args, example_kwargs=kwargs)
96
+ stage = PipelineStage(pipe, state.local_process_index, device=state.device)
97
+
98
+ return stage
99
+
100
+
101
+ def pippy_forward(forward, num_chunks, gather_output, *args, **kwargs):
102
+ state = PartialState()
103
+ output = None
104
+
105
+ if state.num_processes == 1:
106
+ output = forward(*args, **kwargs)
107
+ elif state.is_local_main_process:
108
+ found_batch_size = find_pippy_batch_size(args, kwargs)
109
+ if found_batch_size is None:
110
+ raise ValueError("Could not find batch size from args or kwargs")
111
+ else:
112
+ if found_batch_size != num_chunks:
113
+ args = pad_input_tensors(args, found_batch_size, num_chunks)
114
+ kwargs = pad_input_tensors(kwargs, found_batch_size, num_chunks)
115
+ forward(*args, **kwargs)
116
+ elif state.is_last_process:
117
+ output = forward()
118
+ else:
119
+ forward()
120
+ if gather_output:
121
+ # Each node will get a copy of the full output which is only on the last GPU
122
+ output = copy_tensor_to_devices(output)
123
+ return output
124
+
125
+
126
+ def prepare_pippy(
127
+ model,
128
+ split_points: Optional[Union[str, List[str]]] = "auto",
129
+ no_split_module_classes: Optional[List[str]] = None,
130
+ example_args: Optional[Tuple[Any]] = (),
131
+ example_kwargs: Optional[Dict[str, Any]] = None,
132
+ num_chunks: Optional[int] = None,
133
+ gather_output: Optional[bool] = False,
134
+ ):
135
+ """
136
+ Wraps `model` for pipeline parallel inference.
137
+
138
+ Args:
139
+ model (`torch.nn.Module`):
140
+ A model we want to split for pipeline-parallel inference
141
+ split_points (`str` or `List[str]`, defaults to 'auto'):
142
+ How to generate the split points and chunk the model across each GPU. 'auto' will find the best balanced
143
+ split given any model. Should be a list of layer names in the model to split by otherwise.
144
+ no_split_module_classes (`List[str]`):
145
+ A list of class names for layers we don't want to be split.
146
+ example_args (tuple of model inputs):
147
+ The expected inputs for the model that uses order-based inputs. Recommended to use this method if possible.
148
+ example_kwargs (dict of model inputs)
149
+ The expected inputs for the model that uses dictionary-based inputs. This is a *highly* limiting structure
150
+ that requires the same keys be present at *all* inference calls. Not recommended unless the prior condition
151
+ is true for all cases.
152
+ num_chunks (`int`, defaults to the number of available GPUs):
153
+ The number of different stages the Pipeline will have. By default it will assign one chunk per GPU, but
154
+ this can be tuned and played with. In general one should have num_chunks >= num_gpus.
155
+ gather_output (`bool`, defaults to `False`):
156
+ If `True`, the output from the last GPU (which holds the true outputs) is sent across to all GPUs.
157
+ """
158
+ if not is_pippy_available():
159
+ raise ImportError(
160
+ "`pippy` was not found to be installed on your system. Please "
161
+ "install using `pip install torchpippy` or ensure you have at least version 0.2.0"
162
+ )
163
+ state = PartialState()
164
+ example_args = send_to_device(example_args, "cpu")
165
+ example_kwargs = send_to_device(example_kwargs, "cpu")
166
+ if num_chunks is None:
167
+ num_chunks = state.num_processes
168
+ if split_points == "auto":
169
+ device_map = generate_device_map(model, num_chunks, no_split_module_classes=no_split_module_classes)
170
+ split_points = []
171
+ for i in range(1, num_chunks):
172
+ split_points.append(next(k for k, v in device_map.items() if v == i))
173
+ model.hf_split_points = split_points
174
+ stage = build_pipeline(model, split_points, example_args, example_kwargs, num_chunks)
175
+ model._original_forward = model.forward
176
+ model._original_call = model.__call__
177
+ model.pippy_stage = stage
178
+ model.hf_split_points = split_points
179
+
180
+ def forward(*args, **kwargs):
181
+ return pippy_forward(stage.forward, num_chunks, gather_output, *args, **kwargs)
182
+
183
+ # To act like a decorator so that it can be popped when doing `extract_model_from_parallel`
184
+ # Note: creates an infinite recursion loop with `generate`
185
+ model_forward = MethodType(forward, model)
186
+ forward.__wrapped__ = model_forward
187
+ model.forward = forward
188
+ return model
venv/lib/python3.10/site-packages/accelerate/launchers.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import os
16
+ import sys
17
+ import tempfile
18
+
19
+ import torch
20
+
21
+ from .state import AcceleratorState, PartialState
22
+ from .utils import (
23
+ PrecisionType,
24
+ PrepareForLaunch,
25
+ are_libraries_initialized,
26
+ check_cuda_p2p_ib_support,
27
+ get_gpu_info,
28
+ is_mps_available,
29
+ patch_environment,
30
+ )
31
+
32
+
33
+ def test_launch():
34
+ "Verify a `PartialState` can be initialized."
35
+ _ = PartialState()
36
+
37
+
38
+ def notebook_launcher(
39
+ function,
40
+ args=(),
41
+ num_processes=None,
42
+ mixed_precision="no",
43
+ use_port="29500",
44
+ master_addr="127.0.0.1",
45
+ node_rank=0,
46
+ num_nodes=1,
47
+ ):
48
+ """
49
+ Launches a training function, using several processes or multiple nodes if it's possible in the current environment
50
+ (TPU with multiple cores for instance).
51
+
52
+ <Tip warning={true}>
53
+
54
+ To use this function absolutely zero calls to a CUDA device must be made in the notebook session before calling. If
55
+ any have been made, you will need to restart the notebook and make sure no cells use any CUDA capability.
56
+
57
+ Setting `ACCELERATE_DEBUG_MODE="1"` in your environment will run a test before truly launching to ensure that none
58
+ of those calls have been made.
59
+
60
+ </Tip>
61
+
62
+ Args:
63
+ function (`Callable`):
64
+ The training function to execute. If it accepts arguments, the first argument should be the index of the
65
+ process run.
66
+ args (`Tuple`):
67
+ Tuple of arguments to pass to the function (it will receive `*args`).
68
+ num_processes (`int`, *optional*):
69
+ The number of processes to use for training. Will default to 8 in Colab/Kaggle if a TPU is available, to
70
+ the number of GPUs available otherwise.
71
+ mixed_precision (`str`, *optional*, defaults to `"no"`):
72
+ If `fp16` or `bf16`, will use mixed precision training on multi-GPU.
73
+ use_port (`str`, *optional*, defaults to `"29500"`):
74
+ The port to use to communicate between processes when launching a multi-GPU training.
75
+ master_addr (`str`, *optional*, defaults to `"127.0.0.1"`):
76
+ The address to use for communication between processes.
77
+ node_rank (`int`, *optional*, defaults to 0):
78
+ The rank of the current node.
79
+ num_nodes (`int`, *optional*, defaults to 1):
80
+ The number of nodes to use for training.
81
+
82
+ Example:
83
+
84
+ ```python
85
+ # Assume this is defined in a Jupyter Notebook on an instance with two GPUs
86
+ from accelerate import notebook_launcher
87
+
88
+
89
+ def train(*args):
90
+ # Your training function here
91
+ ...
92
+
93
+
94
+ notebook_launcher(train, args=(arg1, arg2), num_processes=2, mixed_precision="fp16")
95
+ ```
96
+ """
97
+ # Are we in a google colab or a Kaggle Kernel?
98
+ in_colab = False
99
+ in_kaggle = False
100
+ if any(key.startswith("KAGGLE") for key in os.environ.keys()):
101
+ in_kaggle = True
102
+ elif "IPython" in sys.modules:
103
+ in_colab = "google.colab" in str(sys.modules["IPython"].get_ipython())
104
+
105
+ try:
106
+ mixed_precision = PrecisionType(mixed_precision.lower())
107
+ except ValueError:
108
+ raise ValueError(
109
+ f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}."
110
+ )
111
+
112
+ if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME", None) is not None):
113
+ # TPU launch
114
+ import torch_xla.distributed.xla_multiprocessing as xmp
115
+
116
+ if len(AcceleratorState._shared_state) > 0:
117
+ raise ValueError(
118
+ "To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
119
+ "your training function. Restart your notebook and make sure no cells initializes an "
120
+ "`Accelerator`."
121
+ )
122
+ if num_processes is None:
123
+ num_processes = 8
124
+
125
+ launcher = PrepareForLaunch(function, distributed_type="TPU")
126
+ print(f"Launching a training on {num_processes} TPU cores.")
127
+ xmp.spawn(launcher, args=args, nprocs=num_processes, start_method="fork")
128
+ elif in_colab and get_gpu_info()[1] < 2:
129
+ # No need for a distributed launch otherwise as it's either CPU or one GPU.
130
+ if torch.cuda.is_available():
131
+ print("Launching training on one GPU.")
132
+ else:
133
+ print("Launching training on one CPU.")
134
+ function(*args)
135
+ else:
136
+ if num_processes is None:
137
+ raise ValueError(
138
+ "You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call."
139
+ )
140
+ if node_rank >= num_nodes:
141
+ raise ValueError("The node_rank must be less than the number of nodes.")
142
+ if num_processes > 1:
143
+ # Multi-GPU launch
144
+ from torch.multiprocessing import start_processes
145
+ from torch.multiprocessing.spawn import ProcessRaisedException
146
+
147
+ if len(AcceleratorState._shared_state) > 0:
148
+ raise ValueError(
149
+ "To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
150
+ "inside your training function. Restart your notebook and make sure no cells initializes an "
151
+ "`Accelerator`."
152
+ )
153
+ # Check for specific libraries known to initialize CUDA that users constantly use
154
+ problematic_imports = are_libraries_initialized("bitsandbytes")
155
+ if len(problematic_imports) > 0:
156
+ err = (
157
+ "Could not start distributed process. Libraries known to initialize CUDA upon import have been "
158
+ "imported already. Please keep these imports inside your training function to try and help with this:"
159
+ )
160
+ for lib_name in problematic_imports:
161
+ err += f"\n\t* `{lib_name}`"
162
+ raise RuntimeError(err)
163
+
164
+ patched_env = dict(
165
+ nproc=num_processes,
166
+ node_rank=node_rank,
167
+ world_size=num_nodes * num_processes,
168
+ master_addr=master_addr,
169
+ master_port=use_port,
170
+ mixed_precision=mixed_precision,
171
+ )
172
+
173
+ # Check for CUDA P2P and IB issues
174
+ if not check_cuda_p2p_ib_support():
175
+ patched_env["nccl_p2p_disable"] = "1"
176
+ patched_env["nccl_ib_disable"] = "1"
177
+
178
+ # torch.distributed will expect a few environment variable to be here. We set the ones common to each
179
+ # process here (the other ones will be set be the launcher).
180
+ with patch_environment(**patched_env):
181
+ # First dummy launch
182
+ if os.environ.get("ACCELERATE_DEBUG_MODE", "false").lower() == "true":
183
+ launcher = PrepareForLaunch(test_launch, distributed_type="MULTI_GPU")
184
+ try:
185
+ start_processes(launcher, args=(), nprocs=num_processes, start_method="fork")
186
+ except ProcessRaisedException as e:
187
+ err = "An issue was found when verifying a stable environment for the notebook launcher."
188
+ if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
189
+ raise RuntimeError(
190
+ f"{err}"
191
+ "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
192
+ "Please review your imports and test them when running the `notebook_launcher()` to identify "
193
+ "which one is problematic and causing CUDA to be initialized."
194
+ ) from e
195
+ else:
196
+ raise RuntimeError(f"{err} The following error was raised: {e}") from e
197
+ # Now the actual launch
198
+ launcher = PrepareForLaunch(function, distributed_type="MULTI_GPU")
199
+ print(f"Launching training on {num_processes} GPUs.")
200
+ try:
201
+ start_processes(launcher, args=args, nprocs=num_processes, start_method="fork")
202
+ except ProcessRaisedException as e:
203
+ if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
204
+ raise RuntimeError(
205
+ "CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
206
+ "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
207
+ "Please review your imports and test them when running the `notebook_launcher()` to identify "
208
+ "which one is problematic and causing CUDA to be initialized."
209
+ ) from e
210
+ else:
211
+ raise RuntimeError(f"An issue was found when launching the training: {e}") from e
212
+
213
+ else:
214
+ # No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
215
+ if is_mps_available():
216
+ os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
217
+ print("Launching training on MPS.")
218
+ elif torch.cuda.is_available():
219
+ print("Launching training on one GPU.")
220
+ else:
221
+ print("Launching training on CPU.")
222
+ function(*args)
223
+
224
+
225
+ def debug_launcher(function, args=(), num_processes=2):
226
+ """
227
+ Launches a training function using several processes on CPU for debugging purposes.
228
+
229
+ <Tip warning={true}>
230
+
231
+ This function is provided for internal testing and debugging, but it's not intended for real trainings. It will
232
+ only use the CPU.
233
+
234
+ </Tip>
235
+
236
+ Args:
237
+ function (`Callable`):
238
+ The training function to execute.
239
+ args (`Tuple`):
240
+ Tuple of arguments to pass to the function (it will receive `*args`).
241
+ num_processes (`int`, *optional*, defaults to 2):
242
+ The number of processes to use for training.
243
+ """
244
+ from torch.multiprocessing import start_processes
245
+
246
+ with tempfile.NamedTemporaryFile() as tmp_file:
247
+ # torch.distributed will expect a few environment variable to be here. We set the ones common to each
248
+ # process here (the other ones will be set be the launcher).
249
+ with patch_environment(
250
+ world_size=num_processes,
251
+ master_addr="127.0.0.1",
252
+ master_port="29500",
253
+ accelerate_mixed_precision="no",
254
+ accelerate_debug_rdv_file=tmp_file.name,
255
+ accelerate_use_cpu="yes",
256
+ ):
257
+ launcher = PrepareForLaunch(function, debug=True)
258
+ start_processes(launcher, args=args, nprocs=num_processes, start_method="fork")
venv/lib/python3.10/site-packages/accelerate/local_sgd.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import torch
15
+
16
+ from accelerate import Accelerator, DistributedType
17
+
18
+
19
+ class LocalSGD:
20
+ """
21
+ A helper class to support local SGD on top of Accelerator. It simply runs a given number of updates independently
22
+ on each device, and averages model weights every K synchronization step.
23
+
24
+ It should be used only in the multi-GPU (or multi-CPU) setup without extensions such as DeepSpeed. In particular,
25
+ this is a simple implementation that cannot support scenarios such as model parallelism.
26
+
27
+
28
+ Although we are not aware of the true origins of this simple approach, the idea of local SGD is quite old and goes
29
+ back to at least:
30
+
31
+ Zhang, J., De Sa, C., Mitliagkas, I., & Ré, C. (2016). [Parallel SGD: When does averaging help?. arXiv preprint
32
+ arXiv:1606.07365.](https://arxiv.org/abs/1606.07365)
33
+
34
+ We credit the term Local SGD to the following paper (but there might be earlier references we are not aware of).
35
+
36
+ Stich, Sebastian Urban. ["Local SGD Converges Fast and Communicates Little." ICLR 2019-International Conference on
37
+ Learning Representations. No. CONF. 2019.](https://arxiv.org/abs/1805.09767)
38
+
39
+ """
40
+
41
+ def __enter__(self):
42
+ if self.enabled:
43
+ self.model_sync_obj = self.model.no_sync()
44
+ self.model_sync_obj.__enter__()
45
+
46
+ return self
47
+
48
+ def __exit__(self, type, value, tb):
49
+ if self.enabled:
50
+ # Average all models on exit
51
+ self._sync_and_avg_model_params()
52
+ self.model_sync_obj.__exit__(type, value, tb)
53
+
54
+ def __init__(self, accelerator: Accelerator, model: torch.nn.Module, local_sgd_steps: int, enabled: bool = True):
55
+ """
56
+ Constructor.
57
+
58
+ Args:
59
+ model (`torch.nn.Module):
60
+ The model whose parameters we need to average.
61
+ accelerator (`Accelerator`):
62
+ Accelerator object.
63
+ local_sgd_steps (`int`):
64
+ A number of local SGD steps (before model parameters are synchronized).
65
+ enabled (`bool):
66
+ Local SGD is disabled if this parameter set to `False`.
67
+ """
68
+ if accelerator.distributed_type not in [
69
+ DistributedType.NO,
70
+ DistributedType.MULTI_CPU,
71
+ DistributedType.MULTI_GPU,
72
+ DistributedType.MULTI_MLU,
73
+ DistributedType.MULTI_NPU,
74
+ ]:
75
+ raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)")
76
+ self.enabled = enabled and accelerator.distributed_type != DistributedType.NO
77
+ self.num_steps = 0
78
+ if self.enabled:
79
+ self.accelerator = accelerator
80
+ self.model = model
81
+ self.local_sgd_steps = local_sgd_steps
82
+
83
+ def step(self):
84
+ """
85
+ This function makes a "step" and synchronizes model parameters if necessary.
86
+ """
87
+ self.num_steps += 1
88
+ if not self.enabled:
89
+ return
90
+
91
+ if self.num_steps % self.local_sgd_steps == 0:
92
+ self._sync_and_avg_model_params()
93
+
94
+ def _sync_and_avg_model_params(self):
95
+ """
96
+ Synchronize + Average model parameters across all GPUs
97
+ """
98
+
99
+ self.accelerator.wait_for_everyone()
100
+ with self.accelerator.autocast():
101
+ for param in self.model.parameters():
102
+ param.data = self.accelerator.reduce(param.data, reduction="mean")
venv/lib/python3.10/site-packages/accelerate/logging.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import functools
16
+ import logging
17
+ import os
18
+
19
+ from .state import PartialState
20
+
21
+
22
+ class MultiProcessAdapter(logging.LoggerAdapter):
23
+ """
24
+ An adapter to assist with logging in multiprocess.
25
+
26
+ `log` takes in an additional `main_process_only` kwarg, which dictates whether it should be called on all processes
27
+ or only the main executed one. Default is `main_process_only=True`.
28
+
29
+ Does not require an `Accelerator` object to be created first.
30
+ """
31
+
32
+ @staticmethod
33
+ def _should_log(main_process_only):
34
+ "Check if log should be performed"
35
+ state = PartialState()
36
+ return not main_process_only or (main_process_only and state.is_main_process)
37
+
38
+ def log(self, level, msg, *args, **kwargs):
39
+ """
40
+ Delegates logger call after checking if we should log.
41
+
42
+ Accepts a new kwarg of `main_process_only`, which will dictate whether it will be logged across all processes
43
+ or only the main executed one. Default is `True` if not passed
44
+
45
+ Also accepts "in_order", which if `True` makes the processes log one by one, in order. This is much easier to
46
+ read, but comes at the cost of sometimes needing to wait for the other processes. Default is `False` to not
47
+ break with the previous behavior.
48
+
49
+ `in_order` is ignored if `main_process_only` is passed.
50
+ """
51
+ if PartialState._shared_state == {}:
52
+ raise RuntimeError(
53
+ "You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility."
54
+ )
55
+ main_process_only = kwargs.pop("main_process_only", True)
56
+ in_order = kwargs.pop("in_order", False)
57
+
58
+ if self.isEnabledFor(level):
59
+ if self._should_log(main_process_only):
60
+ msg, kwargs = self.process(msg, kwargs)
61
+ self.logger.log(level, msg, *args, **kwargs)
62
+
63
+ elif in_order:
64
+ state = PartialState()
65
+ for i in range(state.num_processes):
66
+ if i == state.process_index:
67
+ msg, kwargs = self.process(msg, kwargs)
68
+ self.logger.log(level, msg, *args, **kwargs)
69
+ state.wait_for_everyone()
70
+
71
+ @functools.lru_cache(None)
72
+ def warning_once(self, *args, **kwargs):
73
+ """
74
+ This method is identical to `logger.warning()`, but will emit the warning with the same message only once
75
+
76
+ Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the
77
+ cache. The assumption here is that all warning messages are unique across the code. If they aren't then need to
78
+ switch to another type of cache that includes the caller frame information in the hashing function.
79
+ """
80
+ self.warning(*args, **kwargs)
81
+
82
+
83
+ def get_logger(name: str, log_level: str = None):
84
+ """
85
+ Returns a `logging.Logger` for `name` that can handle multiprocessing.
86
+
87
+ If a log should be called on all processes, pass `main_process_only=False` If a log should be called on all
88
+ processes and in order, also pass `in_order=True`
89
+
90
+ Args:
91
+ name (`str`):
92
+ The name for the logger, such as `__file__`
93
+ log_level (`str`, *optional*):
94
+ The log level to use. If not passed, will default to the `LOG_LEVEL` environment variable, or `INFO` if not
95
+
96
+ Example:
97
+
98
+ ```python
99
+ >>> from accelerate.logging import get_logger
100
+ >>> from accelerate import Accelerator
101
+
102
+ >>> logger = get_logger(__name__)
103
+
104
+ >>> accelerator = Accelerator()
105
+ >>> logger.info("My log", main_process_only=False)
106
+ >>> logger.debug("My log", main_process_only=True)
107
+
108
+ >>> logger = get_logger(__name__, log_level="DEBUG")
109
+ >>> logger.info("My log")
110
+ >>> logger.debug("My second log")
111
+
112
+ >>> array = ["a", "b", "c", "d"]
113
+ >>> letter_at_rank = array[accelerator.process_index]
114
+ >>> logger.info(letter_at_rank, in_order=True)
115
+ ```
116
+ """
117
+ if log_level is None:
118
+ log_level = os.environ.get("ACCELERATE_LOG_LEVEL", None)
119
+ logger = logging.getLogger(name)
120
+ if log_level is not None:
121
+ logger.setLevel(log_level.upper())
122
+ logger.root.setLevel(log_level.upper())
123
+ return MultiProcessAdapter(logger, {})
venv/lib/python3.10/site-packages/accelerate/memory_utils.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import warnings
16
+
17
+
18
+ warnings.warn(
19
+ "memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
20
+ "`from accelerate import find_executable_batch_size` to avoid this warning.",
21
+ FutureWarning,
22
+ )
venv/lib/python3.10/site-packages/accelerate/optimizer.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ import warnings
17
+
18
+ import torch
19
+
20
+ from .state import AcceleratorState, GradientState
21
+ from .utils import DistributedType, honor_type, is_torch_xla_available
22
+
23
+
24
+ if is_torch_xla_available():
25
+ import torch_xla.core.xla_model as xm
26
+
27
+
28
+ def move_to_device(state, device):
29
+ if isinstance(state, (list, tuple)):
30
+ return honor_type(state, (move_to_device(t, device) for t in state))
31
+ elif isinstance(state, dict):
32
+ return type(state)({k: move_to_device(v, device) for k, v in state.items()})
33
+ elif isinstance(state, torch.Tensor):
34
+ return state.to(device)
35
+ return state
36
+
37
+
38
+ class AcceleratedOptimizer(torch.optim.Optimizer):
39
+ """
40
+ Internal wrapper around a torch optimizer.
41
+
42
+ Conditionally will perform `step` and `zero_grad` if gradients should be synchronized when performing gradient
43
+ accumulation.
44
+
45
+ Args:
46
+ optimizer (`torch.optim.optimizer.Optimizer`):
47
+ The optimizer to wrap.
48
+ device_placement (`bool`, *optional*, defaults to `True`):
49
+ Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of
50
+ `optimizer` on the right device.
51
+ scaler (`torch.cuda.amp.grad_scaler.GradScaler`, *optional*):
52
+ The scaler to use in the step function if training with mixed precision.
53
+ """
54
+
55
+ def __init__(self, optimizer, device_placement=True, scaler=None):
56
+ self.optimizer = optimizer
57
+ self.scaler = scaler
58
+ self.accelerator_state = AcceleratorState()
59
+ self.gradient_state = GradientState()
60
+ self.device_placement = device_placement
61
+ self._is_overflow = False
62
+
63
+ if self.scaler is not None:
64
+ self._accelerate_step_called = False
65
+ self._optimizer_original_step_method = self.optimizer.step
66
+ self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step)
67
+
68
+ # Handle device placement
69
+ if device_placement:
70
+ state_dict = self.optimizer.state_dict()
71
+ if self.accelerator_state.distributed_type == DistributedType.XLA:
72
+ xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device)
73
+ else:
74
+ state_dict = move_to_device(state_dict, self.accelerator_state.device)
75
+ self.optimizer.load_state_dict(state_dict)
76
+
77
+ @property
78
+ def state(self):
79
+ return self.optimizer.state
80
+
81
+ @state.setter
82
+ def state(self, state):
83
+ self.optimizer.state = state
84
+
85
+ @property
86
+ def param_groups(self):
87
+ return self.optimizer.param_groups
88
+
89
+ @param_groups.setter
90
+ def param_groups(self, param_groups):
91
+ self.optimizer.param_groups = param_groups
92
+
93
+ @property
94
+ def defaults(self):
95
+ return self.optimizer.defaults
96
+
97
+ @defaults.setter
98
+ def defaults(self, defaults):
99
+ self.optimizer.defaults = defaults
100
+
101
+ def add_param_group(self, param_group):
102
+ self.optimizer.add_param_group(param_group)
103
+
104
+ def load_state_dict(self, state_dict):
105
+ if self.accelerator_state.distributed_type == DistributedType.XLA and self.device_placement:
106
+ xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device)
107
+ self.optimizer.load_state_dict(state_dict)
108
+
109
+ def state_dict(self):
110
+ return self.optimizer.state_dict()
111
+
112
+ def zero_grad(self, set_to_none=None):
113
+ if self.gradient_state.sync_gradients:
114
+ accept_arg = "set_to_none" in inspect.signature(self.optimizer.zero_grad).parameters
115
+ if accept_arg:
116
+ if set_to_none is None:
117
+ set_to_none = True
118
+ self.optimizer.zero_grad(set_to_none=set_to_none)
119
+ else:
120
+ if set_to_none is not None:
121
+ raise ValueError("`set_to_none` for Optimizer.zero_grad` is not supported by this optimizer.")
122
+ self.optimizer.zero_grad()
123
+
124
+ def step(self, closure=None):
125
+ if (
126
+ not self.gradient_state.is_xla_gradients_synced
127
+ and self.accelerator_state.distributed_type == DistributedType.XLA
128
+ ):
129
+ gradients = xm._fetch_gradients(self.optimizer)
130
+ xm.all_reduce("sum", gradients, scale=1.0 / xm.xrt_world_size())
131
+ self.gradient_state.is_xla_gradients_synced = True
132
+ if self.gradient_state.sync_gradients:
133
+ if self.scaler is not None:
134
+ self.optimizer.step = self._optimizer_patched_step_method
135
+
136
+ self.scaler.step(self.optimizer, closure)
137
+ self.scaler.update()
138
+
139
+ if not self._accelerate_step_called:
140
+ # If the optimizer step was skipped, gradient overflow was detected.
141
+ self._is_overflow = True
142
+ else:
143
+ self._is_overflow = False
144
+ # Reset the step method to the original one
145
+ self.optimizer.step = self._optimizer_original_step_method
146
+ # Reset the indicator
147
+ self._accelerate_step_called = False
148
+ else:
149
+ self.optimizer.step(closure)
150
+ if self.accelerator_state.distributed_type == DistributedType.XLA:
151
+ self.gradient_state.is_xla_gradients_synced = False
152
+
153
+ def _switch_parameters(self, parameters_map):
154
+ for param_group in self.optimizer.param_groups:
155
+ param_group["params"] = [parameters_map.get(p, p) for p in param_group["params"]]
156
+
157
+ @property
158
+ def is_overflow(self):
159
+ """Whether or not the optimizer step was done, or skipped because of gradient overflow."""
160
+ warnings.warn(
161
+ "The `is_overflow` property is deprecated and will be removed in version 1.0 of Accelerate use "
162
+ "`optimizer.step_was_skipped` instead.",
163
+ FutureWarning,
164
+ )
165
+ return self._is_overflow
166
+
167
+ @property
168
+ def step_was_skipped(self):
169
+ """Whether or not the optimizer step was skipped."""
170
+ return self._is_overflow
171
+
172
+ def __getstate__(self):
173
+ _ignored_keys = [
174
+ "_accelerate_step_called",
175
+ "_optimizer_original_step_method",
176
+ "_optimizer_patched_step_method",
177
+ ]
178
+ return {k: v for k, v in self.__dict__.items() if k not in _ignored_keys}
179
+
180
+ def __setstate__(self, state):
181
+ self.__dict__.update(state)
182
+ if self.scaler is not None:
183
+ self._accelerate_step_called = False
184
+ self._optimizer_original_step_method = self.optimizer.step
185
+ self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step)
186
+
187
+
188
+ def patch_optimizer_step(accelerated_optimizer: AcceleratedOptimizer, method):
189
+ def patched_step(*args, **kwargs):
190
+ accelerated_optimizer._accelerate_step_called = True
191
+ return method(*args, **kwargs)
192
+
193
+ return patched_step
venv/lib/python3.10/site-packages/accelerate/scheduler.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
16
+
17
+ import warnings
18
+
19
+ from .state import AcceleratorState, GradientState
20
+
21
+
22
+ warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
23
+
24
+
25
+ class AcceleratedScheduler:
26
+ """
27
+ A wrapper around a learning rate scheduler that will only step when the optimizer(s) have a training step. Useful
28
+ to avoid making a scheduler step too fast when gradients went overflow and there was no training step (in mixed
29
+ precision training)
30
+
31
+ When performing gradient accumulation scheduler lengths should not be changed accordingly, Accelerate will always
32
+ step the scheduler to account for it.
33
+
34
+ Args:
35
+ scheduler (`torch.optim.lr_scheduler._LRScheduler`):
36
+ The scheduler to wrap.
37
+ optimizers (one or a list of `torch.optim.Optimizer`):
38
+ The optimizers used.
39
+ step_with_optimizer (`bool`, *optional*, defaults to `True`):
40
+ Whether or not the scheduler should be stepped at each optimizer step.
41
+ split_batches (`bool`, *optional*, defaults to `False`):
42
+ Whether or not the dataloaders split one batch across the different processes (so batch size is the same
43
+ regardless of the number of processes) or create batches on each process (so batch size is the original
44
+ batch size multiplied by the number of processes).
45
+ """
46
+
47
+ def __init__(self, scheduler, optimizers, step_with_optimizer: bool = True, split_batches: bool = False):
48
+ self.scheduler = scheduler
49
+ self.optimizers = optimizers if isinstance(optimizers, (list, tuple)) else [optimizers]
50
+ self.split_batches = split_batches
51
+ self.step_with_optimizer = step_with_optimizer
52
+ self.gradient_state = GradientState()
53
+
54
+ def step(self, *args, **kwargs):
55
+ if not self.step_with_optimizer:
56
+ # No link between scheduler and optimizer -> just step
57
+ self.scheduler.step(*args, **kwargs)
58
+ return
59
+
60
+ # Otherwise, first make sure the optimizer was stepped.
61
+ if not self.gradient_state.sync_gradients:
62
+ if self.gradient_state.adjust_scheduler:
63
+ self.scheduler._step_count += 1
64
+ return
65
+
66
+ for opt in self.optimizers:
67
+ if opt.step_was_skipped:
68
+ return
69
+ if self.split_batches:
70
+ # Split batches -> the training dataloader batch size is not changed so one step per training step
71
+ self.scheduler.step(*args, **kwargs)
72
+ else:
73
+ # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
74
+ # num_processes steps per training step
75
+ num_processes = AcceleratorState().num_processes
76
+ for _ in range(num_processes):
77
+ # Special case when using OneCycle and `drop_last` was not used
78
+ if hasattr(self.scheduler, "total_steps"):
79
+ if self.scheduler._step_count <= self.scheduler.total_steps:
80
+ self.scheduler.step(*args, **kwargs)
81
+ else:
82
+ self.scheduler.step(*args, **kwargs)
83
+
84
+ # Passthroughs
85
+ def get_last_lr(self):
86
+ return self.scheduler.get_last_lr()
87
+
88
+ def state_dict(self):
89
+ return self.scheduler.state_dict()
90
+
91
+ def load_state_dict(self, state_dict):
92
+ self.scheduler.load_state_dict(state_dict)
93
+
94
+ def get_lr(self):
95
+ return self.scheduler.get_lr()
96
+
97
+ def print_lr(self, *args, **kwargs):
98
+ return self.scheduler.print_lr(*args, **kwargs)
venv/lib/python3.10/site-packages/accelerate/state.py ADDED
@@ -0,0 +1,1209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from __future__ import annotations
16
+
17
+ import logging
18
+ import math
19
+ import os
20
+ import threading
21
+ import warnings
22
+ from contextlib import contextmanager
23
+ from functools import partial
24
+ from typing import Any, Callable, Optional
25
+
26
+ import torch
27
+
28
+ from .utils import (
29
+ DistributedType,
30
+ DynamoBackend,
31
+ GradientAccumulationPlugin,
32
+ check_cuda_p2p_ib_support,
33
+ check_fp8_capability,
34
+ get_ccl_version,
35
+ get_cpu_distributed_information,
36
+ get_int_from_env,
37
+ is_ccl_available,
38
+ is_datasets_available,
39
+ is_deepspeed_available,
40
+ is_fp8_available,
41
+ is_ipex_available,
42
+ is_mlu_available,
43
+ is_mps_available,
44
+ is_npu_available,
45
+ is_torch_xla_available,
46
+ is_xpu_available,
47
+ parse_choice_from_env,
48
+ parse_flag_from_env,
49
+ set_numa_affinity,
50
+ )
51
+ from .utils.dataclasses import SageMakerDistributedType
52
+
53
+
54
+ if is_torch_xla_available():
55
+ import torch_xla.core.xla_model as xm
56
+
57
+ if is_mlu_available(check_device=False):
58
+ import torch_mlu # noqa: F401
59
+
60
+ if is_npu_available(check_device=False):
61
+ import torch_npu # noqa: F401
62
+
63
+ logger = logging.getLogger(__name__)
64
+
65
+
66
+ def is_initialized() -> bool:
67
+ """
68
+ Checks if the `AcceleratorState` has been initialized from `Accelerator`. Same as `AcceleratorState.initialized`,
69
+ but works as a module method.
70
+ """
71
+ return AcceleratorState._shared_state != {}
72
+
73
+
74
+ # Lambda function that does nothing
75
+ def do_nothing(*args, **kwargs):
76
+ return None
77
+
78
+
79
+ class ThreadLocalSharedDict(threading.local):
80
+ """
81
+ Descriptor that holds a dict shared between instances of a class in the same thread.
82
+
83
+ Note: Descriptors have slightly different semantics than just a dict field on its own.
84
+ `PartialState(...)._shared_state` and `PartialState._shared_state` (instance vs class) give the same value: the
85
+ underlying _storage dict. Likewise, `PartialState(...)._shared_state = {...}` overrides the _storage dict inside
86
+ the descriptor as you would expect. However, `PartialState._shared_state = {}` actually replaces the descriptor
87
+ object with a dict instead Thus, you should modify the _storage dict in-place (e.g. `_shared_state.clear()`).
88
+
89
+ See Python documentation for an explanation of descriptors: https://docs.python.org/3/howto/descriptor.html
90
+
91
+ This is required for using PyTorch/XLA with PJRT in multithreaded mode (required for TPU v2 and v3).
92
+
93
+ See https://github.com/pytorch/xla/blob/r2.0/docs/pjrt.md#multithreading-on-tpu-v2v3
94
+ """
95
+
96
+ def __init__(self, thread_local: bool = False):
97
+ self._storage = {}
98
+
99
+ def __get__(self, obj, objtype=None):
100
+ return self._storage
101
+
102
+ def __set__(self, obj, value):
103
+ self._storage = value
104
+
105
+
106
+ # Prefer global shared dictionary, except when using TPU.
107
+ SharedDict = dict if not is_torch_xla_available() else ThreadLocalSharedDict
108
+
109
+
110
+ # Inspired by Alex Martelli's 'Borg'.
111
+ class PartialState:
112
+ """
113
+ Singleton class that has information about the current training environment and functions to help with process
114
+ control. Designed to be used when only process control and device execution states are needed. Does *not* need to
115
+ be initialized from `Accelerator`.
116
+
117
+ Args:
118
+ cpu (`bool`, *optional*):
119
+ Whether or not to force the script to execute on CPU. Will ignore any accelerators available if set to
120
+ `True` and force the execution on the CPU.
121
+ kwargs (additional keyword arguments, *optional*):
122
+ Additional keyword arguments to pass to the relevent `init_process_group` function. Valid `kwargs` can be
123
+ found in [`utils.InitProcessGroupKwargs`]. See the example section for detailed usage.
124
+
125
+ **Available attributes:**
126
+
127
+ - **device** (`torch.device`) -- The device to use.
128
+ - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently
129
+ in use.
130
+ - **local_process_index** (`int`) -- The index of the current process on the current server.
131
+ - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type
132
+ of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8').
133
+ - **num_processes** (`int`) -- The number of processes currently launched in parallel.
134
+ - **process_index** (`int`) -- The index of the current process.
135
+ - **is_last_process** (`bool`) -- Whether or not the current process is the last one.
136
+ - **is_main_process** (`bool`) -- Whether or not the current process is the main one.
137
+ - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node.
138
+ - **debug** (`bool`) -- Whether or not the current script is being run in debug mode.
139
+
140
+ Example:
141
+ ```python
142
+ from accelerate.utils import InitProcessGroupKwargs
143
+
144
+ # To include `InitProcessGroupKwargs`, init then call `.to_kwargs()`
145
+ kwargs = InitProcessGroupKwargs(...).to_kwargs()
146
+ state = PartialState(**kwargs)
147
+ ```
148
+ """
149
+
150
+ _shared_state = SharedDict()
151
+ _known_attrs = [
152
+ "_cpu",
153
+ "_mixed_precision",
154
+ "_shared_state",
155
+ "backend",
156
+ "debug",
157
+ "device",
158
+ "distributed_type",
159
+ "fork_launched",
160
+ "local_process_index",
161
+ "num_processes",
162
+ "process_index",
163
+ ]
164
+
165
+ def __init__(self, cpu: bool = False, **kwargs):
166
+ self.__dict__ = self._shared_state
167
+ if not self.initialized:
168
+ self._cpu = cpu
169
+ self.backend = None
170
+ env_device = os.environ.get("ACCELERATE_TORCH_DEVICE", None)
171
+ self.device = torch.device(env_device) if env_device is not None else None
172
+ self.debug = parse_flag_from_env("ACCELERATE_DEBUG_MODE")
173
+ use_sagemaker_dp = kwargs.pop("_use_sagemaker_dp", None)
174
+ dist_information = None
175
+ if use_sagemaker_dp is None:
176
+ use_sagemaker_dp = (
177
+ os.environ.get("ACCELERATE_USE_SAGEMAKER", "false") == "true"
178
+ and os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") != SageMakerDistributedType.NO
179
+ )
180
+
181
+ # Sets up self.backend + imports
182
+ original_backend = kwargs.pop("backend", None)
183
+ backend, distributed_type = self._prepare_backend(cpu, use_sagemaker_dp, original_backend)
184
+ if original_backend is not None and backend != original_backend:
185
+ raise ValueError("Your assigned backend {original_backend} is not avaliable, please use {backend}")
186
+ self.backend = backend
187
+ self.distributed_type = distributed_type
188
+ use_deepspeed = False
189
+ if not cpu and self.backend != "xla":
190
+ if int(os.environ.get("LOCAL_RANK", -1)) != -1:
191
+ # Deal with spawning deepspeed
192
+ if os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true":
193
+ if not is_deepspeed_available():
194
+ raise ImportError(
195
+ "DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source"
196
+ )
197
+ from deepspeed import comm as dist
198
+
199
+ if is_xpu_available() and is_ccl_available():
200
+ os.environ["CCL_PROCESS_LAUNCHER"] = "none"
201
+ os.environ["CCL_LOCAL_SIZE"] = os.environ.get("LOCAL_WORLD_SIZE", "1")
202
+ os.environ["CCL_LOCAL_RANK"] = os.environ.get("LOCAL_RANK", "0")
203
+
204
+ if not dist.is_initialized():
205
+ dist.init_distributed(dist_backend=self.backend, auto_mpi_discovery=False, **kwargs)
206
+ # We need to flag to `use_deepspeed` to be True to override `distributed_type` later
207
+ use_deepspeed = True
208
+ # Deal with all other backends but XPU and CPU, that gets handled special later
209
+ elif (
210
+ self.distributed_type not in (DistributedType.MULTI_XPU, DistributedType.MULTI_CPU)
211
+ and not torch.distributed.is_initialized()
212
+ ):
213
+ torch.distributed.init_process_group(backend=self.backend, **kwargs)
214
+ # XPU and CPU require special env configs to be set
215
+ if self.distributed_type in (DistributedType.MULTI_XPU, DistributedType.MULTI_CPU):
216
+ dist_information = get_cpu_distributed_information()
217
+ os.environ["RANK"] = str(dist_information.rank)
218
+ os.environ["WORLD_SIZE"] = str(dist_information.world_size)
219
+ os.environ["LOCAL_RANK"] = str(dist_information.local_rank)
220
+ os.environ["LOCAL_WORLD_SIZE"] = str(dist_information.local_world_size)
221
+ if self.backend == "ccl" and self.distributed_type == DistributedType.MULTI_XPU:
222
+ os.environ["CCL_PROCESS_LAUNCHER"] = "none"
223
+ os.environ["CCL_LOCAL_SIZE"] = os.environ["LOCAL_WORLD_SIZE"]
224
+ os.environ["CCL_LOCAL_RANK"] = os.environ["LOCAL_RANK"]
225
+ if not os.environ.get("MASTER_PORT", None):
226
+ os.environ["MASTER_PORT"] = "29500"
227
+ if (
228
+ not os.environ.get("MASTER_ADDR", None)
229
+ and dist_information.local_world_size != dist_information.world_size
230
+ and self.backend != "mpi"
231
+ ):
232
+ raise ValueError(
233
+ "Tried to launch on distributed with multinode, but `MASTER_ADDR` env was not set, "
234
+ "please try exporting rank 0's hostname as `MASTER_ADDR`"
235
+ )
236
+ kwargs["rank"] = dist_information.rank
237
+ kwargs["world_size"] = dist_information.world_size
238
+
239
+ if (
240
+ self.distributed_type == DistributedType.MULTI_CPU
241
+ and get_int_from_env(["OMP_NUM_THREADS", "OMP_NUM_THREADS"], 0) > 0
242
+ ):
243
+ import psutil
244
+
245
+ num_cpu_threads_per_process = int(
246
+ psutil.cpu_count(logical=False) / dist_information.local_world_size
247
+ )
248
+ if num_cpu_threads_per_process == 0:
249
+ num_cpu_threads_per_process = 1
250
+ torch.set_num_threads(num_cpu_threads_per_process)
251
+ warnings.warn(
252
+ f"OMP_NUM_THREADS/MKL_NUM_THREADS unset, we set it at {num_cpu_threads_per_process} to improve oob"
253
+ " performance."
254
+ )
255
+
256
+ if not torch.distributed.is_initialized():
257
+ torch.distributed.init_process_group(backend=self.backend, **kwargs)
258
+
259
+ # No backend == no distributed training
260
+ if self.backend is None:
261
+ self.distributed_type = DistributedType.NO
262
+ self.num_processes = 1
263
+ self.process_index = 0
264
+ self.local_process_index = 0
265
+ elif self.backend == "xla":
266
+ # XLA needs device setting first for `set_replication`
267
+ self.set_device()
268
+ xm.set_replication(self.device, xm.get_xla_supported_devices())
269
+ self.num_processes = xm.xrt_world_size()
270
+ self.process_index = xm.get_ordinal()
271
+ if is_torch_xla_available(check_is_tpu=True):
272
+ self.local_process_index = xm.get_local_ordinal()
273
+ else:
274
+ self.local_process_index = int(os.environ.get("LOCAL_RANK", -1))
275
+ else:
276
+ self.num_processes = torch.distributed.get_world_size()
277
+ self.process_index = torch.distributed.get_rank()
278
+ self.local_process_index = (
279
+ int(os.environ.get("LOCAL_RANK", -1)) if dist_information is None else dist_information.local_rank
280
+ )
281
+ self.set_device()
282
+ # Now we can change to deepseed
283
+ if use_deepspeed:
284
+ self.distributed_type = DistributedType.DEEPSPEED
285
+
286
+ # Set CPU affinity if enabled
287
+ if parse_flag_from_env("ACCELERATE_CPU_AFFINITY", False):
288
+ set_numa_affinity(self.local_process_index)
289
+
290
+ # Check for old RTX 4000's that can't use P2P or IB and are on old drivers
291
+ if self.device.type == "cuda" and not check_cuda_p2p_ib_support():
292
+ if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ:
293
+ raise NotImplementedError(
294
+ "Using RTX 4000 series doesn't support faster communication broadband via P2P or IB. "
295
+ 'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which '
296
+ "will do this automatically."
297
+ )
298
+ # Important: This should be the *only* code outside of `self.initialized!`
299
+ self.fork_launched = parse_flag_from_env("FORK_LAUNCHED", 0)
300
+
301
+ def __repr__(self) -> str:
302
+ return (
303
+ f"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\n"
304
+ f"Num processes: {self.num_processes}\n"
305
+ f"Process index: {self.process_index}\n"
306
+ f"Local process index: {self.local_process_index}\n"
307
+ f"Device: {self.device}\n"
308
+ )
309
+
310
+ @staticmethod
311
+ def _reset_state():
312
+ "Resets `_shared_state`, is used internally and should not be called"
313
+ PartialState._shared_state.clear()
314
+
315
+ @property
316
+ def initialized(self) -> bool:
317
+ "Returns whether the `PartialState` has been initialized"
318
+ return self._shared_state != {}
319
+
320
+ @property
321
+ def use_distributed(self):
322
+ """
323
+ Whether the Accelerator is configured for distributed training
324
+ """
325
+ return self.distributed_type != DistributedType.NO and self.num_processes > 1
326
+
327
+ @property
328
+ def is_last_process(self) -> bool:
329
+ "Returns whether the current process is the last one"
330
+ return self.process_index == self.num_processes - 1
331
+
332
+ @property
333
+ def is_main_process(self) -> bool:
334
+ "Returns whether the current process is the main process"
335
+ return (
336
+ self.process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process
337
+ )
338
+
339
+ @property
340
+ def is_local_main_process(self) -> bool:
341
+ "Returns whether the current process is the main process on the local node"
342
+ return (
343
+ self.local_process_index == 0
344
+ if self.distributed_type != DistributedType.MEGATRON_LM
345
+ else self.is_last_process
346
+ )
347
+
348
+ def wait_for_everyone(self):
349
+ """
350
+ Will stop the execution of the current process until every other process has reached that point (so this does
351
+ nothing when the script is only run in one process). Useful to do before saving a model.
352
+
353
+ Example:
354
+
355
+ ```python
356
+ >>> # Assuming two GPU processes
357
+ >>> import time
358
+ >>> from accelerate.state import PartialState
359
+
360
+ >>> state = PartialState()
361
+ >>> if state.is_main_process:
362
+ ... time.sleep(2)
363
+ >>> else:
364
+ ... print("I'm waiting for the main process to finish its sleep...")
365
+ >>> state.wait_for_everyone()
366
+ >>> # Should print on every process at the same time
367
+ >>> print("Everyone is here")
368
+ ```
369
+ """
370
+ if self.distributed_type in (
371
+ DistributedType.MULTI_GPU,
372
+ DistributedType.MULTI_MLU,
373
+ DistributedType.MULTI_NPU,
374
+ DistributedType.MULTI_XPU,
375
+ DistributedType.MULTI_CPU,
376
+ DistributedType.DEEPSPEED,
377
+ DistributedType.FSDP,
378
+ ):
379
+ torch.distributed.barrier()
380
+ elif self.distributed_type == DistributedType.XLA:
381
+ xm.rendezvous("accelerate.utils.wait_for_everyone")
382
+
383
+ def _goes_first(self, is_main: bool):
384
+ if not is_main:
385
+ self.wait_for_everyone()
386
+
387
+ yield
388
+
389
+ if is_main:
390
+ self.wait_for_everyone()
391
+
392
+ @contextmanager
393
+ def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False):
394
+ """
395
+ Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing
396
+ distributed inference, such as with different prompts.
397
+
398
+ Note that when using a `dict`, all keys need to have the same number of elements.
399
+
400
+ Args:
401
+ inputs (`list`, `tuple`, `torch.Tensor`, `dict` of `list`/`tuple`/`torch.Tensor`, or `datasets.Dataset`):
402
+ The input to split between processes.
403
+ apply_padding (`bool`, `optional`, defaults to `False`):
404
+ Whether to apply padding by repeating the last element of the input so that all processes have the same
405
+ number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing
406
+ in less inputs than there are processes. If so, just remember to drop the padded elements afterwards.
407
+
408
+
409
+ Example:
410
+
411
+ ```python
412
+ # Assume there are two processes
413
+ from accelerate import PartialState
414
+
415
+ state = PartialState()
416
+ with state.split_between_processes(["A", "B", "C"]) as inputs:
417
+ print(inputs)
418
+ # Process 0
419
+ ["A", "B"]
420
+ # Process 1
421
+ ["C"]
422
+
423
+ with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs:
424
+ print(inputs)
425
+ # Process 0
426
+ ["A", "B"]
427
+ # Process 1
428
+ ["C", "C"]
429
+ ```
430
+ """
431
+ if self.num_processes == 1:
432
+ yield inputs
433
+ return
434
+ length = len(inputs)
435
+ # Nested dictionary of any types
436
+ if isinstance(inputs, dict):
437
+ length = len(inputs[list(inputs.keys())[0]])
438
+ if not all(len(v) == length for v in inputs.values()):
439
+ raise ValueError("All values in the dictionary must have the same length")
440
+ num_samples_per_process = math.ceil(length / self.num_processes)
441
+ start_index = self.process_index * num_samples_per_process
442
+ end_index = start_index + num_samples_per_process
443
+ if (len(inputs) % self.num_processes != 0) and (self.process_index == self.num_processes - 1):
444
+ end_index = length
445
+
446
+ def _split_values(inputs, start_index, end_index):
447
+ if isinstance(inputs, (list, tuple, torch.Tensor)):
448
+ if start_index >= len(inputs):
449
+ result = inputs[-1:]
450
+ else:
451
+ result = inputs[start_index:end_index]
452
+ if apply_padding:
453
+ if isinstance(result, torch.Tensor):
454
+ from accelerate.utils import pad_across_processes, send_to_device
455
+
456
+ # The tensor needs to be on the device before we can pad it
457
+ tensorized_result = send_to_device(result, self.device)
458
+ result = pad_across_processes(tensorized_result, pad_index=inputs[-1])
459
+ else:
460
+ result += [result[-1]] * (num_samples_per_process - len(result))
461
+ return result
462
+ elif isinstance(inputs, dict):
463
+ for key in inputs.keys():
464
+ inputs[key] = _split_values(inputs[key], start_index, end_index)
465
+ return inputs
466
+ else:
467
+ if is_datasets_available():
468
+ from datasets import Dataset
469
+
470
+ if isinstance(inputs, Dataset):
471
+ if start_index >= len(inputs):
472
+ start_index = len(inputs) - 1
473
+ if end_index > len(inputs):
474
+ end_index = len(inputs)
475
+ result_idcs = list(range(start_index, end_index))
476
+ if apply_padding:
477
+ result_idcs += [end_index - 1] * (num_samples_per_process - len(result_idcs))
478
+ return inputs.select(result_idcs)
479
+ return inputs
480
+
481
+ yield _split_values(inputs, start_index, end_index)
482
+
483
+ @contextmanager
484
+ def main_process_first(self):
485
+ """
486
+ Lets the main process go first inside a with block.
487
+
488
+ The other processes will enter the with block after the main process exits.
489
+
490
+ Example:
491
+
492
+ ```python
493
+ >>> from accelerate import Accelerator
494
+
495
+ >>> accelerator = Accelerator()
496
+ >>> with accelerator.main_process_first():
497
+ ... # This will be printed first by process 0 then in a seemingly
498
+ ... # random order by the other processes.
499
+ ... print(f"This will be printed by process {accelerator.process_index}")
500
+ ```
501
+ """
502
+ yield from self._goes_first(self.is_main_process)
503
+
504
+ @contextmanager
505
+ def local_main_process_first(self):
506
+ """
507
+ Lets the local main process go inside a with block.
508
+
509
+ The other processes will enter the with block after the main process exits.
510
+
511
+ Example:
512
+
513
+ ```python
514
+ >>> from accelerate.state import PartialState
515
+
516
+ >>> state = PartialState()
517
+ >>> with state.local_main_process_first():
518
+ ... # This will be printed first by local process 0 then in a seemingly
519
+ ... # random order by the other processes.
520
+ ... print(f"This will be printed by process {state.local_process_index}")
521
+ ```
522
+ """
523
+ yield from self._goes_first(self.is_local_main_process)
524
+
525
+ def on_main_process(self, function: Callable[..., Any] = None):
526
+ """
527
+ Decorator that only runs the decorated function on the main process.
528
+
529
+ Args:
530
+ function (`Callable`): The function to decorate.
531
+
532
+ Example:
533
+
534
+ ```python
535
+ >>> from accelerate.state import PartialState
536
+
537
+ >>> state = PartialState()
538
+
539
+
540
+ >>> @state.on_main_process
541
+ ... def print_something():
542
+ ... print("This will be printed by process 0 only.")
543
+
544
+
545
+ >>> print_something()
546
+ "This will be printed by process 0 only"
547
+ ```
548
+ """
549
+ if not self.initialized:
550
+ raise ValueError("The `PartialState` or `Accelerator` must be initialized before calling this function.")
551
+ if self.is_main_process or not self.use_distributed:
552
+ return function
553
+ return do_nothing
554
+
555
+ def on_local_main_process(self, function: Callable[..., Any] = None):
556
+ """
557
+ Decorator that only runs the decorated function on the local main process.
558
+
559
+ Args:
560
+ function (`Callable`): The function to decorate.
561
+
562
+ Example:
563
+ ```python
564
+ # Assume we have 2 servers with 4 processes each.
565
+ from accelerate.state import PartialState
566
+
567
+ state = PartialState()
568
+
569
+
570
+ @state.on_local_main_process
571
+ def print_something():
572
+ print("This will be printed by process 0 only on each server.")
573
+
574
+
575
+ print_something()
576
+ # On server 1:
577
+ "This will be printed by process 0 only"
578
+ # On server 2:
579
+ "This will be printed by process 0 only"
580
+ ```
581
+ """
582
+ if self.is_local_main_process or not self.use_distributed:
583
+ return function
584
+ return do_nothing
585
+
586
+ def on_last_process(self, function: Callable[..., Any]):
587
+ """
588
+ Decorator that only runs the decorated function on the last process.
589
+
590
+ Args:
591
+ function (`Callable`): The function to decorate.
592
+
593
+ Example:
594
+ ```python
595
+ # Assume we have 4 processes.
596
+ from accelerate.state import PartialState
597
+
598
+ state = PartialState()
599
+
600
+
601
+ @state.on_last_process
602
+ def print_something():
603
+ print(f"Printed on process {state.process_index}")
604
+
605
+
606
+ print_something()
607
+ "Printed on process 3"
608
+ ```
609
+ """
610
+ if self.is_last_process or not self.use_distributed:
611
+ return function
612
+ return do_nothing
613
+
614
+ def on_process(self, function: Callable[..., Any] = None, process_index: int = None):
615
+ """
616
+ Decorator that only runs the decorated function on the process with the given index.
617
+
618
+ Args:
619
+ function (`Callable`, `optional`):
620
+ The function to decorate.
621
+ process_index (`int`, `optional`):
622
+ The index of the process on which to run the function.
623
+
624
+ Example:
625
+ ```python
626
+ # Assume we have 4 processes.
627
+ from accelerate.state import PartialState
628
+
629
+ state = PartialState()
630
+
631
+
632
+ @state.on_process(process_index=2)
633
+ def print_something():
634
+ print(f"Printed on process {state.process_index}")
635
+
636
+
637
+ print_something()
638
+ "Printed on process 2"
639
+ ```
640
+ """
641
+ if function is None:
642
+ return partial(self.on_process, process_index=process_index)
643
+ if (self.process_index == process_index) or (not self.use_distributed):
644
+ return function
645
+ return do_nothing
646
+
647
+ def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None):
648
+ """
649
+ Decorator that only runs the decorated function on the process with the given index on the current node.
650
+
651
+ Args:
652
+ function (`Callable`, *optional*):
653
+ The function to decorate.
654
+ local_process_index (`int`, *optional*):
655
+ The index of the local process on which to run the function.
656
+
657
+ Example:
658
+ ```python
659
+ # Assume we have 2 servers with 4 processes each.
660
+ from accelerate import Accelerator
661
+
662
+ accelerator = Accelerator()
663
+
664
+
665
+ @accelerator.on_local_process(local_process_index=2)
666
+ def print_something():
667
+ print(f"Printed on process {accelerator.local_process_index}")
668
+
669
+
670
+ print_something()
671
+ # On server 1:
672
+ "Printed on process 2"
673
+ # On server 2:
674
+ "Printed on process 2"
675
+ ```
676
+ """
677
+ if function is None:
678
+ return partial(self.on_local_process, local_process_index=local_process_index)
679
+ if (self.local_process_index == local_process_index) or (not self.use_distributed):
680
+ return function
681
+ return do_nothing
682
+
683
+ def print(self, *args, **kwargs):
684
+ if self.is_local_main_process:
685
+ print(*args, **kwargs)
686
+
687
+ @property
688
+ def default_device(self) -> torch.device:
689
+ """
690
+ Returns the default device which is:
691
+ - MPS if `torch.backends.mps.is_available()` and `torch.backends.mps.is_built()` both return True.
692
+ - CUDA if `torch.cuda.is_available()`
693
+ - MLU if `is_mlu_available()`
694
+ - NPU if `is_npu_available()`
695
+ - CPU otherwise
696
+ """
697
+ if is_mps_available():
698
+ os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
699
+ return torch.device("mps")
700
+ elif is_mlu_available():
701
+ return torch.device("mlu")
702
+ elif torch.cuda.is_available():
703
+ return torch.device("cuda")
704
+ elif is_xpu_available():
705
+ return torch.device("xpu:0")
706
+ elif is_npu_available():
707
+ return torch.device("npu")
708
+ else:
709
+ return torch.device("cpu")
710
+
711
+ def _prepare_backend(
712
+ self, cpu: bool = False, sagemaker_dp=False, backend: str = None
713
+ ) -> tuple[str, DistributedType]:
714
+ "Prepares any imports needed before initializing the distributed backend and sets `self.backend` properly"
715
+ distributed_type = None
716
+ if sagemaker_dp:
717
+ import smdistributed.dataparallel.torch.torch_smddp # noqa
718
+
719
+ backend = "smddp"
720
+ distributed_type = DistributedType.MULTI_GPU
721
+ elif is_torch_xla_available():
722
+ backend = "xla"
723
+ distributed_type = DistributedType.XLA
724
+ elif int(os.environ.get("LOCAL_RANK", -1)) != -1 and not cpu:
725
+ if is_mlu_available():
726
+ backend = "cncl"
727
+ distributed_type = DistributedType.MULTI_MLU
728
+ elif torch.cuda.is_available():
729
+ if backend is None:
730
+ backend = "nccl"
731
+ distributed_type = DistributedType.MULTI_GPU
732
+ elif is_npu_available():
733
+ backend = "hccl"
734
+ distributed_type = DistributedType.MULTI_NPU
735
+
736
+ if distributed_type is None and (
737
+ int(os.environ.get("LOCAL_RANK", -1)) != -1
738
+ or get_int_from_env(["PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE", "WORLD_SIZE"], 1) > 1
739
+ ):
740
+ if not cpu and is_xpu_available():
741
+ distributed_type = DistributedType.MULTI_XPU
742
+ else:
743
+ distributed_type = DistributedType.MULTI_CPU
744
+
745
+ if (
746
+ backend in (None, "ccl")
747
+ and is_ccl_available()
748
+ and (get_int_from_env(["CCL_WORKER_COUNT"], 0) > 0 or distributed_type == DistributedType.MULTI_XPU)
749
+ ):
750
+ if get_ccl_version() >= "1.12":
751
+ import oneccl_bindings_for_pytorch # noqa: F401
752
+ else:
753
+ import torch_ccl # noqa: F401
754
+
755
+ backend = "ccl"
756
+ elif backend in (None, "mpi") and torch.distributed.is_mpi_available():
757
+ backend = "mpi"
758
+ else:
759
+ backend = "gloo"
760
+ if distributed_type is None:
761
+ distributed_type = DistributedType.NO
762
+
763
+ return backend, distributed_type
764
+
765
+ def set_device(self):
766
+ """
767
+ Sets the device in `self.device` to the current distributed environment.
768
+ """
769
+ if self.device is not None:
770
+ return
771
+ if self.distributed_type == DistributedType.NO:
772
+ self.device = torch.device("cpu") if self._cpu else self.default_device
773
+ return
774
+ device = str(self.distributed_type).split(".")[-1].replace("MULTI_", "").lower()
775
+ if device not in ("cpu", "gpu", "mlu", "npu", "xpu", "xla"):
776
+ raise ValueError(
777
+ f"Can't set device for {self.distributed_type} ({device}), verify we should be calling `_set_device()` for it!"
778
+ )
779
+ if device == "xla":
780
+ self.device = xm.xla_device()
781
+ else:
782
+ if device == "gpu":
783
+ device = "cuda"
784
+ self.device = torch.device(device, self.local_process_index)
785
+ if self.device is not None:
786
+ if device == "xpu":
787
+ torch.xpu.set_device(self.device)
788
+ elif device == "mlu":
789
+ torch.mlu.set_device(self.device)
790
+ elif device == "npu":
791
+ torch.npu.set_device(self.device)
792
+ elif device == "cuda":
793
+ torch.cuda.set_device(self.device)
794
+
795
+ def __getattr__(self, name: str):
796
+ # By this point we know that no attributes of `self` contain `name`,
797
+ # so we just modify the error message
798
+ if name in self._known_attrs:
799
+ raise AttributeError(
800
+ f"`PartialState` object has no attribute `{name}`. "
801
+ "This happens if `PartialState._reset_state()` was called and "
802
+ "an `Accelerator` or `PartialState` was not reinitialized."
803
+ )
804
+ # Raise a typical AttributeError
805
+ raise AttributeError(f"'PartialState' object has no attribute '{name}'")
806
+
807
+
808
+ class AcceleratorState:
809
+ """
810
+ Singleton class that has information about the current training environment.
811
+
812
+ **Available attributes:**
813
+
814
+ - **device** (`torch.device`) -- The device to use.
815
+ - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently
816
+ in use.
817
+ - **initialized** (`bool`) -- Whether or not the `AcceleratorState` has been initialized from `Accelerator`.
818
+ - **local_process_index** (`int`) -- The index of the current process on the current server.
819
+ - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type
820
+ of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8').
821
+ - **num_processes** (`int`) -- The number of processes currently launched in parallel.
822
+ - **process_index** (`int`) -- The index of the current process.
823
+ - **is_last_process** (`bool`) -- Whether or not the current process is the last one.
824
+ - **is_main_process** (`bool`) -- Whether or not the current process is the main one.
825
+ - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node.
826
+ - **debug** (`bool`) -- Whether or not the current script is being run in debug mode.
827
+ """
828
+
829
+ _shared_state = SharedDict()
830
+ _known_attrs = PartialState._known_attrs + [
831
+ "deepspeed_plugin",
832
+ "use_ipex",
833
+ "fsdp_plugin",
834
+ "megatron_lm_plugin",
835
+ "dynamo_plugin",
836
+ ]
837
+
838
+ def __init__(
839
+ self,
840
+ mixed_precision: str = None,
841
+ cpu: bool = False,
842
+ dynamo_plugin=None,
843
+ deepspeed_plugin=None,
844
+ fsdp_plugin=None,
845
+ megatron_lm_plugin=None,
846
+ _from_accelerator: bool = False,
847
+ **kwargs,
848
+ ):
849
+ self.__dict__ = self._shared_state
850
+ if parse_flag_from_env("ACCELERATE_USE_CPU"):
851
+ cpu = True
852
+ if PartialState._shared_state == {}:
853
+ PartialState(cpu, **kwargs)
854
+ self.__dict__.update(PartialState._shared_state)
855
+ self._check_initialized(mixed_precision, cpu)
856
+ if not self.initialized:
857
+ self.deepspeed_plugin = None
858
+ self.use_ipex = None
859
+ mixed_precision = (
860
+ parse_choice_from_env("ACCELERATE_MIXED_PRECISION", "no")
861
+ if mixed_precision is None
862
+ else mixed_precision.lower()
863
+ )
864
+ if mixed_precision == "fp8":
865
+ if not is_fp8_available():
866
+ raise ValueError(
867
+ "Using `fp8` precision requires `transformer_engine` or `MS-AMP` to be installed."
868
+ )
869
+ elif not check_fp8_capability():
870
+ logger.warning(
871
+ f"The current device has compute capability of {torch.cuda.get_device_capability()} which is "
872
+ "insufficient for FP8 mixed precision training (requires a GPU Hopper/Ada Lovelace "
873
+ "or higher, compute capability of 8.9 or higher). Will use FP16 instead."
874
+ )
875
+ mixed_precision = "fp16"
876
+
877
+ self.dynamo_plugin = dynamo_plugin
878
+ if not _from_accelerator:
879
+ raise ValueError(
880
+ "Please make sure to properly initialize your accelerator via `accelerator = Accelerator()` "
881
+ "before using any functionality from the `accelerate` library."
882
+ )
883
+ # deepspeed handles mixed_precision using deepspeed_config
884
+ self._mixed_precision = "no" if self.distributed_type == DistributedType.DEEPSPEED else mixed_precision
885
+ if self.distributed_type == DistributedType.XLA and is_torch_xla_available(check_is_tpu=True):
886
+ if mixed_precision == "bf16":
887
+ if os.environ.get("ACCELERATE_DOWNCAST_BF16"):
888
+ os.environ["XLA_USE_BF16"] = str(0)
889
+ os.environ["XLA_DOWNCAST_BF16"] = str(1)
890
+ self.downcast_bfloat = True
891
+ else:
892
+ os.environ["XLA_USE_BF16"] = str(1)
893
+ os.environ["XLA_DOWNCAST_BF16"] = str(0)
894
+ self.downcast_bfloat = False
895
+ elif os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" and not cpu:
896
+ self.deepspeed_plugin = deepspeed_plugin
897
+ elif self.distributed_type in [
898
+ DistributedType.MULTI_GPU,
899
+ DistributedType.MULTI_MLU,
900
+ DistributedType.MULTI_NPU,
901
+ DistributedType.MULTI_XPU,
902
+ ]:
903
+ if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true":
904
+ self.distributed_type = DistributedType.FSDP
905
+ if self._mixed_precision != "no":
906
+ fsdp_plugin.set_mixed_precision(self._mixed_precision)
907
+ self.fsdp_plugin = fsdp_plugin
908
+ if os.environ.get("ACCELERATE_USE_MEGATRON_LM", "false") == "true" and self.distributed_type not in [
909
+ DistributedType.MULTI_NPU,
910
+ DistributedType.MULTI_XPU,
911
+ ]:
912
+ self.distributed_type = DistributedType.MEGATRON_LM
913
+ megatron_lm_plugin.set_mixed_precision(self._mixed_precision)
914
+ self.megatron_lm_plugin = megatron_lm_plugin
915
+ elif self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.NO]:
916
+ if is_ipex_available():
917
+ # check if user disables it explicitly
918
+ self.use_ipex = parse_flag_from_env("ACCELERATE_USE_IPEX", default=True)
919
+ else:
920
+ self.use_ipex = False
921
+ if (
922
+ self.dynamo_plugin.backend != DynamoBackend.NO
923
+ and self._mixed_precision == "no"
924
+ and self.device.type == "cuda"
925
+ ):
926
+ torch.backends.cuda.matmul.allow_tf32 = True
927
+ PartialState._shared_state["distributed_type"] = self.distributed_type
928
+
929
+ @property
930
+ def initialized(self) -> bool:
931
+ return self._shared_state != PartialState._shared_state
932
+
933
+ def __repr__(self):
934
+ repr = PartialState().__repr__() + f"\nMixed precision type: {self.mixed_precision}\n"
935
+ if self.distributed_type == DistributedType.DEEPSPEED:
936
+ repr += f"ds_config: {self.deepspeed_plugin.deepspeed_config}\n"
937
+ return repr
938
+
939
+ def _check_initialized(self, mixed_precision=None, cpu=None):
940
+ "Checks if a modification is trying to be made and the `AcceleratorState` has already been initialized"
941
+ if self.initialized:
942
+ err = "AcceleratorState has already been initialized and cannot be changed, restart your runtime completely and pass `{flag}` to `Accelerator()`."
943
+ if cpu and self.device.type != "cpu":
944
+ raise ValueError(err.format(flag="cpu=True"))
945
+ if (
946
+ mixed_precision is not None
947
+ and mixed_precision != self._mixed_precision
948
+ and self.distributed_type != DistributedType.DEEPSPEED
949
+ ):
950
+ raise ValueError(err.format(flag=f"mixed_precision='{mixed_precision}'"))
951
+
952
+ # For backward compatibility
953
+ @property
954
+ def use_fp16(self):
955
+ warnings.warn(
956
+ "The `use_fp16` property is deprecated and will be removed in version 1.0 of Accelerate use "
957
+ "`AcceleratorState.mixed_precision == 'fp16'` instead.",
958
+ FutureWarning,
959
+ )
960
+ return self._mixed_precision != "no"
961
+
962
+ @property
963
+ def mixed_precision(self):
964
+ if self.distributed_type == DistributedType.DEEPSPEED:
965
+ config = self.deepspeed_plugin.deepspeed_config
966
+ if config.get("fp16", {}).get("enabled", False):
967
+ mixed_precision = "fp16"
968
+ elif config.get("bf16", {}).get("enabled", False):
969
+ mixed_precision = "bf16"
970
+ else:
971
+ mixed_precision = "no"
972
+ else:
973
+ mixed_precision = self._mixed_precision
974
+ return mixed_precision
975
+
976
+ @staticmethod
977
+ def _reset_state(reset_partial_state: bool = False):
978
+ "Resets `_shared_state`, is used internally and should not be called"
979
+ AcceleratorState._shared_state.clear()
980
+ if reset_partial_state:
981
+ PartialState._reset_state()
982
+
983
+ @property
984
+ def use_distributed(self):
985
+ """
986
+ Whether the Accelerator is configured for distributed training
987
+ """
988
+ return PartialState().use_distributed
989
+
990
+ @property
991
+ def is_last_process(self) -> bool:
992
+ "Returns whether the current process is the last one"
993
+ return PartialState().is_last_process
994
+
995
+ @property
996
+ def is_main_process(self) -> bool:
997
+ "Returns whether the current process is the main process"
998
+ return PartialState().is_main_process
999
+
1000
+ @property
1001
+ def is_local_main_process(self) -> bool:
1002
+ "Returns whether the current process is the main process on the local node"
1003
+ return PartialState().is_local_main_process
1004
+
1005
+ def wait_for_everyone(self):
1006
+ PartialState().wait_for_everyone()
1007
+
1008
+ @contextmanager
1009
+ def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False):
1010
+ """
1011
+ Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing
1012
+ distributed inference, such as with different prompts.
1013
+
1014
+ Note that when using a `dict`, all keys need to have the same number of elements.
1015
+
1016
+ Args:
1017
+ inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`):
1018
+ The input to split between processes.
1019
+ apply_padding (`bool`, `optional`, defaults to `False`):
1020
+ Whether to apply padding by repeating the last element of the input so that all processes have the same
1021
+ number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing
1022
+ in less inputs than there are processes. If so, just remember to drop the padded elements afterwards.
1023
+
1024
+
1025
+ Example:
1026
+
1027
+ ```python
1028
+ # Assume there are two processes
1029
+ from accelerate.state import AcceleratorState
1030
+
1031
+ state = AcceleratorState()
1032
+ with state.split_between_processes(["A", "B", "C"]) as inputs:
1033
+ print(inputs)
1034
+ # Process 0
1035
+ ["A", "B"]
1036
+ # Process 1
1037
+ ["C"]
1038
+
1039
+ with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs:
1040
+ print(inputs)
1041
+ # Process 0
1042
+ ["A", "B"]
1043
+ # Process 1
1044
+ ["C", "C"]
1045
+ ```
1046
+ """
1047
+ with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs:
1048
+ yield inputs
1049
+
1050
+ @contextmanager
1051
+ def main_process_first(self):
1052
+ """
1053
+ Lets the main process go first inside a with block.
1054
+
1055
+ The other processes will enter the with block after the main process exits.
1056
+ """
1057
+ with PartialState().main_process_first():
1058
+ yield
1059
+
1060
+ @contextmanager
1061
+ def local_main_process_first(self):
1062
+ """
1063
+ Lets the local main process go inside a with block.
1064
+
1065
+ The other processes will enter the with block after the main process exits.
1066
+ """
1067
+ with PartialState().local_main_process_first():
1068
+ yield
1069
+
1070
+ def print(self, *args, **kwargs):
1071
+ PartialState().print(*args, **kwargs)
1072
+
1073
+ def __getattr__(self, name: str):
1074
+ # By this point we know that no attributes of `self` contain `name`,
1075
+ # so we just modify the error message
1076
+ if name in self._known_attrs:
1077
+ raise AttributeError(
1078
+ f"`AcceleratorState` object has no attribute `{name}`. "
1079
+ "This happens if `AcceleratorState._reset_state()` was called and "
1080
+ "an `Accelerator` or `PartialState` was not reinitialized."
1081
+ )
1082
+ # Raise a typical AttributeError
1083
+ raise AttributeError(f"'AcceleratorState' object has no attribute '{name}'")
1084
+
1085
+
1086
+ class GradientState:
1087
+ """
1088
+ Singleton class that has information related to gradient synchronization for gradient accumulation
1089
+
1090
+ **Available attributes:**
1091
+
1092
+ - **end_of_dataloader** (`bool`) -- Whether we have reached the end the current dataloader
1093
+ - **remainder** (`int`) -- The number of extra samples that were added from padding the dataloader
1094
+ - **sync_gradients** (`bool`) -- Whether the gradients should be synced across all devices
1095
+ - **active_dataloader** (`Optional[DataLoader]`) -- The dataloader that is currently being iterated over
1096
+ - **dataloader_references** (`List[Optional[DataLoader]]`) -- A list of references to the dataloaders that are
1097
+ being iterated over
1098
+ - **num_steps** (`int`) -- The number of steps to accumulate over
1099
+ - **adjust_scheduler** (`bool`) -- Whether the scheduler should be adjusted to account for the gradient
1100
+ accumulation
1101
+ - **sync_with_dataloader** (`bool`) -- Whether the gradients should be synced at the end of the dataloader
1102
+ iteration and the number of total steps reset
1103
+ - **is_xla_gradients_synced** (`bool`) -- Whether the XLA gradients have been synchronized. It is initialized
1104
+ as false. Once gradients have been reduced before the optimizer step, this flag is set to true. Subsequently,
1105
+ after each step, the flag is reset to false. FSDP will always synchronize the gradients, hence
1106
+ is_xla_gradients_synced is always true.
1107
+ """
1108
+
1109
+ _shared_state = SharedDict()
1110
+
1111
+ def __init__(self, gradient_accumulation_plugin: Optional[GradientAccumulationPlugin] = None):
1112
+ self.__dict__ = self._shared_state
1113
+ if not self.initialized:
1114
+ self.sync_gradients = True
1115
+ self.active_dataloader = None
1116
+ self.dataloader_references = [None]
1117
+ self.plugin_kwargs = (
1118
+ gradient_accumulation_plugin.to_kwargs() if gradient_accumulation_plugin is not None else {}
1119
+ )
1120
+ self._is_xla_gradients_synced = False
1121
+
1122
+ # Plugin args are different and can be updated
1123
+ if gradient_accumulation_plugin is not None and self.plugin_kwargs != gradient_accumulation_plugin.to_kwargs():
1124
+ self.plugin_kwargs = gradient_accumulation_plugin.to_kwargs()
1125
+
1126
+ @property
1127
+ def num_steps(self) -> int:
1128
+ "Returns the number of steps to accumulate over"
1129
+ return self.plugin_kwargs.get("num_steps", 1)
1130
+
1131
+ @property
1132
+ def adjust_scheduler(self) -> bool:
1133
+ "Returns whether the scheduler should be adjusted"
1134
+ return self.plugin_kwargs.get("adjust_scheduler", False)
1135
+
1136
+ @property
1137
+ def sync_with_dataloader(self) -> bool:
1138
+ "Returns whether the gradients should be synced at the end of the dataloader iteration and the number of total steps reset"
1139
+ return self.plugin_kwargs.get("sync_with_dataloader", True)
1140
+
1141
+ @property
1142
+ def initialized(self) -> bool:
1143
+ "Returns whether the `GradientState` has been initialized"
1144
+ return GradientState._shared_state != {}
1145
+
1146
+ @property
1147
+ def end_of_dataloader(self) -> bool:
1148
+ "Returns whether we have reached the end of the current dataloader"
1149
+ if not self.in_dataloader:
1150
+ return False
1151
+ return self.active_dataloader.end_of_dataloader
1152
+
1153
+ @property
1154
+ def remainder(self) -> int:
1155
+ "Returns the number of extra samples that were added from padding the dataloader"
1156
+ if not self.in_dataloader:
1157
+ return -1
1158
+ return self.active_dataloader.remainder
1159
+
1160
+ def __repr__(self):
1161
+ return (
1162
+ f"Sync Gradients: {self.sync_gradients}\n"
1163
+ f"At end of current dataloader: {self.end_of_dataloader}\n"
1164
+ f"Extra samples added: {self.remainder}\n"
1165
+ f"Gradient accumulation plugin: {self.plugin_kwargs}\n"
1166
+ )
1167
+
1168
+ @property
1169
+ def is_xla_gradients_synced(self):
1170
+ "Returns the value of is_xla_gradients_synced. FSDP will always synchronize the gradients, hence is_xla_gradients_synced is always true."
1171
+ if parse_flag_from_env("ACCELERATE_USE_FSDP", default=False):
1172
+ return True
1173
+ return self._is_xla_gradients_synced
1174
+
1175
+ @is_xla_gradients_synced.setter
1176
+ def is_xla_gradients_synced(self, is_synced):
1177
+ "Set the _is_xla_gradients_synced attribute."
1178
+ self._is_xla_gradients_synced = is_synced
1179
+
1180
+ def _set_sync_gradients(self, sync_gradients):
1181
+ "Private function that sets whether gradients should be synchronized. Users should not have to call this."
1182
+ self.sync_gradients = sync_gradients
1183
+ # Allow grad-sync to automatically work on TPUs
1184
+ if (
1185
+ self.sync_gradients
1186
+ and is_torch_xla_available(check_is_tpu=True)
1187
+ and PartialState().distributed_type == DistributedType.XLA
1188
+ ):
1189
+ xm.mark_step()
1190
+
1191
+ def _add_dataloader(self, dataloader):
1192
+ "Private function that adds a dataloader to `self.dataloader_references` and sets `in_dataloader` to `True`. Users should not have to call this."
1193
+ self.active_dataloader = dataloader
1194
+ self.dataloader_references.append(self.active_dataloader)
1195
+
1196
+ def _remove_dataloader(self, dataloader):
1197
+ "Private function that removes a dataloader from `self.dataloader_references` and sets `in_dataloader` to `False` if there are no more dataloaders. Users should not have to call this."
1198
+ self.dataloader_references.remove(dataloader)
1199
+ self.active_dataloader = self.dataloader_references[-1]
1200
+
1201
+ @property
1202
+ def in_dataloader(self) -> bool:
1203
+ "Returns whether the current process is in a dataloader"
1204
+ return self.active_dataloader is not None
1205
+
1206
+ @staticmethod
1207
+ def _reset_state():
1208
+ "Resets `_shared_state`, is used internally and should not be called"
1209
+ GradientState._shared_state.clear()
venv/lib/python3.10/site-packages/accelerate/tracking.py ADDED
@@ -0,0 +1,1023 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Expectation:
16
+ # Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}
17
+
18
+ import json
19
+ import os
20
+ import time
21
+ from functools import wraps
22
+ from typing import Any, Dict, List, Optional, Union
23
+
24
+ import yaml
25
+
26
+ from .logging import get_logger
27
+ from .state import PartialState
28
+ from .utils import (
29
+ LoggerType,
30
+ is_aim_available,
31
+ is_clearml_available,
32
+ is_comet_ml_available,
33
+ is_dvclive_available,
34
+ is_mlflow_available,
35
+ is_tensorboard_available,
36
+ is_wandb_available,
37
+ listify,
38
+ )
39
+
40
+
41
+ _available_trackers = []
42
+
43
+ if is_tensorboard_available():
44
+ _available_trackers.append(LoggerType.TENSORBOARD)
45
+
46
+ if is_wandb_available():
47
+ _available_trackers.append(LoggerType.WANDB)
48
+
49
+ if is_comet_ml_available():
50
+ _available_trackers.append(LoggerType.COMETML)
51
+
52
+ if is_aim_available():
53
+ _available_trackers.append(LoggerType.AIM)
54
+
55
+ if is_mlflow_available():
56
+ _available_trackers.append(LoggerType.MLFLOW)
57
+
58
+ if is_clearml_available():
59
+ _available_trackers.append(LoggerType.CLEARML)
60
+
61
+ if is_dvclive_available():
62
+ _available_trackers.append(LoggerType.DVCLIVE)
63
+
64
+ logger = get_logger(__name__)
65
+
66
+
67
+ def on_main_process(function):
68
+ """
69
+ Decorator to selectively run the decorated function on the main process only based on the `main_process_only`
70
+ attribute in a class.
71
+
72
+ Checks at function execution rather than initialization time, not triggering the initialization of the
73
+ `PartialState`.
74
+ """
75
+
76
+ @wraps(function)
77
+ def execute_on_main_process(self, *args, **kwargs):
78
+ if getattr(self, "main_process_only", False):
79
+ return PartialState().on_main_process(function)(self, *args, **kwargs)
80
+ else:
81
+ return function(self, *args, **kwargs)
82
+
83
+ return execute_on_main_process
84
+
85
+
86
+ def get_available_trackers():
87
+ "Returns a list of all supported available trackers in the system"
88
+ return _available_trackers
89
+
90
+
91
+ class GeneralTracker:
92
+ """
93
+ A base Tracker class to be used for all logging integration implementations.
94
+
95
+ Each function should take in `**kwargs` that will automatically be passed in from a base dictionary provided to
96
+ [`Accelerator`].
97
+
98
+ Should implement `name`, `requires_logging_directory`, and `tracker` properties such that:
99
+
100
+ `name` (`str`): String representation of the tracker class name, such as "TensorBoard" `requires_logging_directory`
101
+ (`bool`): Whether the logger requires a directory to store their logs. `tracker` (`object`): Should return internal
102
+ tracking mechanism used by a tracker class (such as the `run` for wandb)
103
+
104
+ Implementations can also include a `main_process_only` (`bool`) attribute to toggle if relevent logging, init, and
105
+ other functions should occur on the main process or across all processes (by default will use `True`)
106
+ """
107
+
108
+ main_process_only = True
109
+
110
+ def __init__(self, _blank=False):
111
+ if not _blank:
112
+ err = ""
113
+ if not hasattr(self, "name"):
114
+ err += "`name`"
115
+ if not hasattr(self, "requires_logging_directory"):
116
+ if len(err) > 0:
117
+ err += ", "
118
+ err += "`requires_logging_directory`"
119
+
120
+ # as tracker is a @property that relies on post-init
121
+ if "tracker" not in dir(self):
122
+ if len(err) > 0:
123
+ err += ", "
124
+ err += "`tracker`"
125
+ if len(err) > 0:
126
+ raise NotImplementedError(
127
+ f"The implementation for this tracker class is missing the following "
128
+ f"required attributes. Please define them in the class definition: "
129
+ f"{err}"
130
+ )
131
+
132
+ def store_init_configuration(self, values: dict):
133
+ """
134
+ Logs `values` as hyperparameters for the run. Implementations should use the experiment configuration
135
+ functionality of a tracking API.
136
+
137
+ Args:
138
+ values (Dictionary `str` to `bool`, `str`, `float` or `int`):
139
+ Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
140
+ `str`, `float`, `int`, or `None`.
141
+ """
142
+ pass
143
+
144
+ def log(self, values: dict, step: Optional[int], **kwargs):
145
+ """
146
+ Logs `values` to the current run. Base `log` implementations of a tracking API should go in here, along with
147
+ special behavior for the `step parameter.
148
+
149
+ Args:
150
+ values (Dictionary `str` to `str`, `float`, or `int`):
151
+ Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`.
152
+ step (`int`, *optional*):
153
+ The run step. If included, the log will be affiliated with this step.
154
+ """
155
+ pass
156
+
157
+ def finish(self):
158
+ """
159
+ Should run any finalizing functions within the tracking API. If the API should not have one, just don't
160
+ overwrite that method.
161
+ """
162
+ pass
163
+
164
+
165
+ class TensorBoardTracker(GeneralTracker):
166
+ """
167
+ A `Tracker` class that supports `tensorboard`. Should be initialized at the start of your script.
168
+
169
+ Args:
170
+ run_name (`str`):
171
+ The name of the experiment run
172
+ logging_dir (`str`, `os.PathLike`):
173
+ Location for TensorBoard logs to be stored.
174
+ **kwargs (additional keyword arguments, *optional*):
175
+ Additional key word arguments passed along to the `tensorboard.SummaryWriter.__init__` method.
176
+ """
177
+
178
+ name = "tensorboard"
179
+ requires_logging_directory = True
180
+
181
+ @on_main_process
182
+ def __init__(self, run_name: str, logging_dir: Union[str, os.PathLike], **kwargs):
183
+ try:
184
+ from torch.utils import tensorboard
185
+ except ModuleNotFoundError:
186
+ import tensorboardX as tensorboard
187
+ super().__init__()
188
+ self.run_name = run_name
189
+ self.logging_dir = os.path.join(logging_dir, run_name)
190
+ self.writer = tensorboard.SummaryWriter(self.logging_dir, **kwargs)
191
+ logger.debug(f"Initialized TensorBoard project {self.run_name} logging to {self.logging_dir}")
192
+ logger.debug(
193
+ "Make sure to log any initial configurations with `self.store_init_configuration` before training!"
194
+ )
195
+
196
+ @property
197
+ def tracker(self):
198
+ return self.writer
199
+
200
+ @on_main_process
201
+ def store_init_configuration(self, values: dict):
202
+ """
203
+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. Stores the
204
+ hyperparameters in a yaml file for future use.
205
+
206
+ Args:
207
+ values (Dictionary `str` to `bool`, `str`, `float` or `int`):
208
+ Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
209
+ `str`, `float`, `int`, or `None`.
210
+ """
211
+ self.writer.add_hparams(values, metric_dict={})
212
+ self.writer.flush()
213
+ project_run_name = time.time()
214
+ dir_name = os.path.join(self.logging_dir, str(project_run_name))
215
+ os.makedirs(dir_name, exist_ok=True)
216
+ with open(os.path.join(dir_name, "hparams.yml"), "w") as outfile:
217
+ try:
218
+ yaml.dump(values, outfile)
219
+ except yaml.representer.RepresenterError:
220
+ logger.error("Serialization to store hyperparameters failed")
221
+ raise
222
+ logger.debug("Stored initial configuration hyperparameters to TensorBoard and hparams yaml file")
223
+
224
+ @on_main_process
225
+ def log(self, values: dict, step: Optional[int] = None, **kwargs):
226
+ """
227
+ Logs `values` to the current run.
228
+
229
+ Args:
230
+ values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`):
231
+ Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of
232
+ `str` to `float`/`int`.
233
+ step (`int`, *optional*):
234
+ The run step. If included, the log will be affiliated with this step.
235
+ kwargs:
236
+ Additional key word arguments passed along to either `SummaryWriter.add_scaler`,
237
+ `SummaryWriter.add_text`, or `SummaryWriter.add_scalers` method based on the contents of `values`.
238
+ """
239
+ values = listify(values)
240
+ for k, v in values.items():
241
+ if isinstance(v, (int, float)):
242
+ self.writer.add_scalar(k, v, global_step=step, **kwargs)
243
+ elif isinstance(v, str):
244
+ self.writer.add_text(k, v, global_step=step, **kwargs)
245
+ elif isinstance(v, dict):
246
+ self.writer.add_scalars(k, v, global_step=step, **kwargs)
247
+ self.writer.flush()
248
+ logger.debug("Successfully logged to TensorBoard")
249
+
250
+ @on_main_process
251
+ def log_images(self, values: dict, step: Optional[int], **kwargs):
252
+ """
253
+ Logs `images` to the current run.
254
+
255
+ Args:
256
+ values (Dictionary `str` to `List` of `np.ndarray` or `PIL.Image`):
257
+ Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or
258
+ step (`int`, *optional*):
259
+ The run step. If included, the log will be affiliated with this step.
260
+ kwargs:
261
+ Additional key word arguments passed along to the `SummaryWriter.add_image` method.
262
+ """
263
+ for k, v in values.items():
264
+ self.writer.add_images(k, v, global_step=step, **kwargs)
265
+ logger.debug("Successfully logged images to TensorBoard")
266
+
267
+ @on_main_process
268
+ def finish(self):
269
+ """
270
+ Closes `TensorBoard` writer
271
+ """
272
+ self.writer.close()
273
+ logger.debug("TensorBoard writer closed")
274
+
275
+
276
+ class WandBTracker(GeneralTracker):
277
+ """
278
+ A `Tracker` class that supports `wandb`. Should be initialized at the start of your script.
279
+
280
+ Args:
281
+ run_name (`str`):
282
+ The name of the experiment run.
283
+ **kwargs (additional keyword arguments, *optional*):
284
+ Additional key word arguments passed along to the `wandb.init` method.
285
+ """
286
+
287
+ name = "wandb"
288
+ requires_logging_directory = False
289
+ main_process_only = False
290
+
291
+ @on_main_process
292
+ def __init__(self, run_name: str, **kwargs):
293
+ super().__init__()
294
+ self.run_name = run_name
295
+
296
+ import wandb
297
+
298
+ self.run = wandb.init(project=self.run_name, **kwargs)
299
+ logger.debug(f"Initialized WandB project {self.run_name}")
300
+ logger.debug(
301
+ "Make sure to log any initial configurations with `self.store_init_configuration` before training!"
302
+ )
303
+
304
+ @property
305
+ def tracker(self):
306
+ return self.run
307
+
308
+ @on_main_process
309
+ def store_init_configuration(self, values: dict):
310
+ """
311
+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
312
+
313
+ Args:
314
+ values (Dictionary `str` to `bool`, `str`, `float` or `int`):
315
+ Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
316
+ `str`, `float`, `int`, or `None`.
317
+ """
318
+ import wandb
319
+
320
+ wandb.config.update(values, allow_val_change=True)
321
+ logger.debug("Stored initial configuration hyperparameters to WandB")
322
+
323
+ @on_main_process
324
+ def log(self, values: dict, step: Optional[int] = None, **kwargs):
325
+ """
326
+ Logs `values` to the current run.
327
+
328
+ Args:
329
+ values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`):
330
+ Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of
331
+ `str` to `float`/`int`.
332
+ step (`int`, *optional*):
333
+ The run step. If included, the log will be affiliated with this step.
334
+ kwargs:
335
+ Additional key word arguments passed along to the `wandb.log` method.
336
+ """
337
+ self.run.log(values, step=step, **kwargs)
338
+ logger.debug("Successfully logged to WandB")
339
+
340
+ @on_main_process
341
+ def log_images(self, values: dict, step: Optional[int] = None, **kwargs):
342
+ """
343
+ Logs `images` to the current run.
344
+
345
+ Args:
346
+ values (Dictionary `str` to `List` of `np.ndarray` or `PIL.Image`):
347
+ Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or
348
+ step (`int`, *optional*):
349
+ The run step. If included, the log will be affiliated with this step.
350
+ kwargs:
351
+ Additional key word arguments passed along to the `wandb.log` method.
352
+ """
353
+ import wandb
354
+
355
+ for k, v in values.items():
356
+ self.log({k: [wandb.Image(image) for image in v]}, step=step, **kwargs)
357
+ logger.debug("Successfully logged images to WandB")
358
+
359
+ @on_main_process
360
+ def log_table(
361
+ self,
362
+ table_name: str,
363
+ columns: List[str] = None,
364
+ data: List[List[Any]] = None,
365
+ dataframe: Any = None,
366
+ step: Optional[int] = None,
367
+ **kwargs,
368
+ ):
369
+ """
370
+ Log a Table containing any object type (text, image, audio, video, molecule, html, etc). Can be defined either
371
+ with `columns` and `data` or with `dataframe`.
372
+
373
+ Args:
374
+ table_name (`str`):
375
+ The name to give to the logged table on the wandb workspace
376
+ columns (list of `str`, *optional*):
377
+ The name of the columns on the table
378
+ data (List of List of Any data type, *optional*):
379
+ The data to be logged in the table
380
+ dataframe (Any data type, *optional*):
381
+ The data to be logged in the table
382
+ step (`int`, *optional*):
383
+ The run step. If included, the log will be affiliated with this step.
384
+ """
385
+ import wandb
386
+
387
+ values = {table_name: wandb.Table(columns=columns, data=data, dataframe=dataframe)}
388
+ self.log(values, step=step, **kwargs)
389
+
390
+ @on_main_process
391
+ def finish(self):
392
+ """
393
+ Closes `wandb` writer
394
+ """
395
+ self.run.finish()
396
+ logger.debug("WandB run closed")
397
+
398
+
399
+ class CometMLTracker(GeneralTracker):
400
+ """
401
+ A `Tracker` class that supports `comet_ml`. Should be initialized at the start of your script.
402
+
403
+ API keys must be stored in a Comet config file.
404
+
405
+ Args:
406
+ run_name (`str`):
407
+ The name of the experiment run.
408
+ **kwargs (additional keyword arguments, *optional*):
409
+ Additional key word arguments passed along to the `Experiment.__init__` method.
410
+ """
411
+
412
+ name = "comet_ml"
413
+ requires_logging_directory = False
414
+
415
+ @on_main_process
416
+ def __init__(self, run_name: str, **kwargs):
417
+ super().__init__()
418
+ self.run_name = run_name
419
+
420
+ from comet_ml import Experiment
421
+
422
+ self.writer = Experiment(project_name=run_name, **kwargs)
423
+ logger.debug(f"Initialized CometML project {self.run_name}")
424
+ logger.debug(
425
+ "Make sure to log any initial configurations with `self.store_init_configuration` before training!"
426
+ )
427
+
428
+ @property
429
+ def tracker(self):
430
+ return self.writer
431
+
432
+ @on_main_process
433
+ def store_init_configuration(self, values: dict):
434
+ """
435
+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
436
+
437
+ Args:
438
+ values (Dictionary `str` to `bool`, `str`, `float` or `int`):
439
+ Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
440
+ `str`, `float`, `int`, or `None`.
441
+ """
442
+ self.writer.log_parameters(values)
443
+ logger.debug("Stored initial configuration hyperparameters to CometML")
444
+
445
+ @on_main_process
446
+ def log(self, values: dict, step: Optional[int] = None, **kwargs):
447
+ """
448
+ Logs `values` to the current run.
449
+
450
+ Args:
451
+ values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`):
452
+ Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of
453
+ `str` to `float`/`int`.
454
+ step (`int`, *optional*):
455
+ The run step. If included, the log will be affiliated with this step.
456
+ kwargs:
457
+ Additional key word arguments passed along to either `Experiment.log_metric`, `Experiment.log_other`,
458
+ or `Experiment.log_metrics` method based on the contents of `values`.
459
+ """
460
+ if step is not None:
461
+ self.writer.set_step(step)
462
+ for k, v in values.items():
463
+ if isinstance(v, (int, float)):
464
+ self.writer.log_metric(k, v, step=step, **kwargs)
465
+ elif isinstance(v, str):
466
+ self.writer.log_other(k, v, **kwargs)
467
+ elif isinstance(v, dict):
468
+ self.writer.log_metrics(v, step=step, **kwargs)
469
+ logger.debug("Successfully logged to CometML")
470
+
471
+ @on_main_process
472
+ def finish(self):
473
+ """
474
+ Closes `comet-ml` writer
475
+ """
476
+ self.writer.end()
477
+ logger.debug("CometML run closed")
478
+
479
+
480
+ class AimTracker(GeneralTracker):
481
+ """
482
+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.
483
+
484
+ Args:
485
+ run_name (`str`):
486
+ The name of the experiment run.
487
+ **kwargs (additional keyword arguments, *optional*):
488
+ Additional key word arguments passed along to the `Run.__init__` method.
489
+ """
490
+
491
+ name = "aim"
492
+ requires_logging_directory = True
493
+
494
+ @on_main_process
495
+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = ".", **kwargs):
496
+ self.run_name = run_name
497
+
498
+ from aim import Run
499
+
500
+ self.writer = Run(repo=logging_dir, **kwargs)
501
+ self.writer.name = self.run_name
502
+ logger.debug(f"Initialized Aim project {self.run_name}")
503
+ logger.debug(
504
+ "Make sure to log any initial configurations with `self.store_init_configuration` before training!"
505
+ )
506
+
507
+ @property
508
+ def tracker(self):
509
+ return self.writer
510
+
511
+ @on_main_process
512
+ def store_init_configuration(self, values: dict):
513
+ """
514
+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
515
+
516
+ Args:
517
+ values (`dict`):
518
+ Values to be stored as initial hyperparameters as key-value pairs.
519
+ """
520
+ self.writer["hparams"] = values
521
+
522
+ @on_main_process
523
+ def log(self, values: dict, step: Optional[int], **kwargs):
524
+ """
525
+ Logs `values` to the current run.
526
+
527
+ Args:
528
+ values (`dict`):
529
+ Values to be logged as key-value pairs.
530
+ step (`int`, *optional*):
531
+ The run step. If included, the log will be affiliated with this step.
532
+ kwargs:
533
+ Additional key word arguments passed along to the `Run.track` method.
534
+ """
535
+ # Note: replace this with the dictionary support when merged
536
+ for key, value in values.items():
537
+ self.writer.track(value, name=key, step=step, **kwargs)
538
+
539
+ @on_main_process
540
+ def log_images(self, values: dict, step: Optional[int] = None, kwargs: Optional[Dict[str, dict]] = None):
541
+ """
542
+ Logs `images` to the current run.
543
+
544
+ Args:
545
+ values (`Dict[str, Union[np.ndarray, PIL.Image, Tuple[np.ndarray, str], Tuple[PIL.Image, str]]]`):
546
+ Values to be logged as key-value pairs. The values need to have type `np.ndarray` or PIL.Image. If a
547
+ tuple is provided, the first element should be the image and the second element should be the caption.
548
+ step (`int`, *optional*):
549
+ The run step. If included, the log will be affiliated with this step.
550
+ kwargs (`Dict[str, dict]`):
551
+ Additional key word arguments passed along to the `Run.Image` and `Run.track` method specified by the
552
+ keys `aim_image` and `track`, respectively.
553
+ """
554
+ import aim
555
+
556
+ aim_image_kw = {}
557
+ track_kw = {}
558
+
559
+ if kwargs is not None:
560
+ aim_image_kw = kwargs.get("aim_image", {})
561
+ track_kw = kwargs.get("track", {})
562
+
563
+ for key, value in values.items():
564
+ if isinstance(value, tuple):
565
+ img, caption = value
566
+ else:
567
+ img, caption = value, ""
568
+ aim_image = aim.Image(img, caption=caption, **aim_image_kw)
569
+ self.writer.track(aim_image, name=key, step=step, **track_kw)
570
+
571
+ @on_main_process
572
+ def finish(self):
573
+ """
574
+ Closes `aim` writer
575
+ """
576
+ self.writer.close()
577
+
578
+
579
+ class MLflowTracker(GeneralTracker):
580
+ """
581
+ A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script.
582
+
583
+ Args:
584
+ experiment_name (`str`, *optional*):
585
+ Name of the experiment. Environment variable MLFLOW_EXPERIMENT_NAME has priority over this argument.
586
+ logging_dir (`str` or `os.PathLike`, defaults to `"."`):
587
+ Location for mlflow logs to be stored.
588
+ run_id (`str`, *optional*):
589
+ If specified, get the run with the specified UUID and log parameters and metrics under that run. The run’s
590
+ end time is unset and its status is set to running, but the run’s other attributes (source_version,
591
+ source_type, etc.) are not changed. Environment variable MLFLOW_RUN_ID has priority over this argument.
592
+ tags (`Dict[str, str]`, *optional*):
593
+ An optional `dict` of `str` keys and values, or a `str` dump from a `dict`, to set as tags on the run. If a
594
+ run is being resumed, these tags are set on the resumed run. If a new run is being created, these tags are
595
+ set on the new run. Environment variable MLFLOW_TAGS has priority over this argument.
596
+ nested_run (`bool`, *optional*, defaults to `False`):
597
+ Controls whether run is nested in parent run. True creates a nested run. Environment variable
598
+ MLFLOW_NESTED_RUN has priority over this argument.
599
+ run_name (`str`, *optional*):
600
+ Name of new run (stored as a mlflow.runName tag). Used only when `run_id` is unspecified.
601
+ description (`str`, *optional*):
602
+ An optional string that populates the description box of the run. If a run is being resumed, the
603
+ description is set on the resumed run. If a new run is being created, the description is set on the new
604
+ run.
605
+ """
606
+
607
+ name = "mlflow"
608
+ requires_logging_directory = False
609
+
610
+ @on_main_process
611
+ def __init__(
612
+ self,
613
+ experiment_name: str = None,
614
+ logging_dir: Optional[Union[str, os.PathLike]] = None,
615
+ run_id: Optional[str] = None,
616
+ tags: Optional[Union[Dict[str, Any], str]] = None,
617
+ nested_run: Optional[bool] = False,
618
+ run_name: Optional[str] = None,
619
+ description: Optional[str] = None,
620
+ ):
621
+ experiment_name = os.environ.get("MLFLOW_EXPERIMENT_NAME", experiment_name)
622
+ run_id = os.environ.get("MLFLOW_RUN_ID", run_id)
623
+ tags = os.environ.get("MLFLOW_TAGS", tags)
624
+ if isinstance(tags, str):
625
+ tags = json.loads(tags)
626
+
627
+ nested_run = os.environ.get("MLFLOW_NESTED_RUN", nested_run)
628
+
629
+ import mlflow
630
+
631
+ exps = mlflow.search_experiments(filter_string=f"name = '{experiment_name}'")
632
+ if len(exps) > 0:
633
+ if len(exps) > 1:
634
+ logger.warning("Multiple experiments with the same name found. Using first one.")
635
+ experiment_id = exps[0].experiment_id
636
+ else:
637
+ experiment_id = mlflow.create_experiment(
638
+ name=experiment_name,
639
+ artifact_location=logging_dir,
640
+ tags=tags,
641
+ )
642
+
643
+ self.active_run = mlflow.start_run(
644
+ run_id=run_id,
645
+ experiment_id=experiment_id,
646
+ run_name=run_name,
647
+ nested=nested_run,
648
+ tags=tags,
649
+ description=description,
650
+ )
651
+
652
+ logger.debug(f"Initialized mlflow experiment {experiment_name}")
653
+ logger.debug(
654
+ "Make sure to log any initial configurations with `self.store_init_configuration` before training!"
655
+ )
656
+
657
+ @property
658
+ def tracker(self):
659
+ return self.active_run
660
+
661
+ @on_main_process
662
+ def store_init_configuration(self, values: dict):
663
+ """
664
+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
665
+
666
+ Args:
667
+ values (`dict`):
668
+ Values to be stored as initial hyperparameters as key-value pairs.
669
+ """
670
+ import mlflow
671
+
672
+ for name, value in list(values.items()):
673
+ # internally, all values are converted to str in MLflow
674
+ if len(str(value)) > mlflow.utils.validation.MAX_PARAM_VAL_LENGTH:
675
+ logger.warning_once(
676
+ f'Accelerate is attempting to log a value of "{value}" for key "{name}" as a parameter. MLflow\'s'
677
+ f" log_param() only accepts values no longer than {mlflow.utils.validation.MAX_PARAM_VAL_LENGTH} characters so we dropped this attribute."
678
+ )
679
+ del values[name]
680
+
681
+ values_list = list(values.items())
682
+
683
+ # MLflow cannot log more than 100 values in one go, so we have to split it
684
+ for i in range(0, len(values_list), mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH):
685
+ mlflow.log_params(dict(values_list[i : i + mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH]))
686
+
687
+ logger.debug("Stored initial configuration hyperparameters to MLflow")
688
+
689
+ @on_main_process
690
+ def log(self, values: dict, step: Optional[int]):
691
+ """
692
+ Logs `values` to the current run.
693
+
694
+ Args:
695
+ values (`dict`):
696
+ Values to be logged as key-value pairs.
697
+ step (`int`, *optional*):
698
+ The run step. If included, the log will be affiliated with this step.
699
+ """
700
+ metrics = {}
701
+ for k, v in values.items():
702
+ if isinstance(v, (int, float)):
703
+ metrics[k] = v
704
+ else:
705
+ logger.warning_once(
706
+ f'MLflowTracker is attempting to log a value of "{v}" of type {type(v)} for key "{k}" as a metric. '
707
+ "MLflow's log_metric() only accepts float and int types so we dropped this attribute."
708
+ )
709
+ import mlflow
710
+
711
+ mlflow.log_metrics(metrics, step=step)
712
+ logger.debug("Successfully logged to mlflow")
713
+
714
+ @on_main_process
715
+ def finish(self):
716
+ """
717
+ End the active MLflow run.
718
+ """
719
+ import mlflow
720
+
721
+ mlflow.end_run()
722
+
723
+
724
+ class ClearMLTracker(GeneralTracker):
725
+ """
726
+ A `Tracker` class that supports `clearml`. Should be initialized at the start of your script.
727
+
728
+ Args:
729
+ run_name (`str`, *optional*):
730
+ Name of the experiment. Environment variables `CLEARML_PROJECT` and `CLEARML_TASK` have priority over this
731
+ argument.
732
+ **kwargs (additional keyword arguments, *optional*):
733
+ Kwargs passed along to the `Task.__init__` method.
734
+ """
735
+
736
+ name = "clearml"
737
+ requires_logging_directory = False
738
+
739
+ @on_main_process
740
+ def __init__(self, run_name: str = None, **kwargs):
741
+ from clearml import Task
742
+
743
+ current_task = Task.current_task()
744
+ self._initialized_externally = False
745
+ if current_task:
746
+ self._initialized_externally = True
747
+ self.task = current_task
748
+ return
749
+
750
+ kwargs.setdefault("project_name", os.environ.get("CLEARML_PROJECT", run_name))
751
+ kwargs.setdefault("task_name", os.environ.get("CLEARML_TASK", run_name))
752
+ self.task = Task.init(**kwargs)
753
+
754
+ @property
755
+ def tracker(self):
756
+ return self.task
757
+
758
+ @on_main_process
759
+ def store_init_configuration(self, values: dict):
760
+ """
761
+ Connect configuration dictionary to the Task object. Should be run at the beginning of your experiment.
762
+
763
+ Args:
764
+ values (`dict`):
765
+ Values to be stored as initial hyperparameters as key-value pairs.
766
+ """
767
+ return self.task.connect_configuration(values)
768
+
769
+ @on_main_process
770
+ def log(self, values: Dict[str, Union[int, float]], step: Optional[int] = None, **kwargs):
771
+ """
772
+ Logs `values` dictionary to the current run. The dictionary keys must be strings. The dictionary values must be
773
+ ints or floats
774
+
775
+ Args:
776
+ values (`Dict[str, Union[int, float]]`):
777
+ Values to be logged as key-value pairs. If the key starts with 'eval_'/'test_'/'train_', the value will
778
+ be reported under the 'eval'/'test'/'train' series and the respective prefix will be removed.
779
+ Otherwise, the value will be reported under the 'train' series, and no prefix will be removed.
780
+ step (`int`, *optional*):
781
+ If specified, the values will be reported as scalars, with the iteration number equal to `step`.
782
+ Otherwise they will be reported as single values.
783
+ kwargs:
784
+ Additional key word arguments passed along to the `clearml.Logger.report_single_value` or
785
+ `clearml.Logger.report_scalar` methods.
786
+ """
787
+ clearml_logger = self.task.get_logger()
788
+ for k, v in values.items():
789
+ if not isinstance(v, (int, float)):
790
+ logger.warning_once(
791
+ "Accelerator is attempting to log a value of "
792
+ f'"{v}" of type {type(v)} for key "{k}" as a scalar. '
793
+ "This invocation of ClearML logger's report_scalar() "
794
+ "is incorrect so we dropped this attribute."
795
+ )
796
+ continue
797
+ if step is None:
798
+ clearml_logger.report_single_value(name=k, value=v, **kwargs)
799
+ continue
800
+ title, series = ClearMLTracker._get_title_series(k)
801
+ clearml_logger.report_scalar(title=title, series=series, value=v, iteration=step, **kwargs)
802
+
803
+ @on_main_process
804
+ def log_images(self, values: dict, step: Optional[int] = None, **kwargs):
805
+ """
806
+ Logs `images` to the current run.
807
+
808
+ Args:
809
+ values (`Dict[str, List[Union[np.ndarray, PIL.Image]]`):
810
+ Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or
811
+ step (`int`, *optional*):
812
+ The run step. If included, the log will be affiliated with this step.
813
+ kwargs:
814
+ Additional key word arguments passed along to the `clearml.Logger.report_image` method.
815
+ """
816
+ clearml_logger = self.task.get_logger()
817
+ for k, v in values.items():
818
+ title, series = ClearMLTracker._get_title_series(k)
819
+ clearml_logger.report_image(title=title, series=series, iteration=step, image=v, **kwargs)
820
+
821
+ @on_main_process
822
+ def log_table(
823
+ self,
824
+ table_name: str,
825
+ columns: List[str] = None,
826
+ data: List[List[Any]] = None,
827
+ dataframe: Any = None,
828
+ step: Optional[int] = None,
829
+ **kwargs,
830
+ ):
831
+ """
832
+ Log a Table to the task. Can be defined eitherwith `columns` and `data` or with `dataframe`.
833
+
834
+ Args:
835
+ table_name (`str`):
836
+ The name of the table
837
+ columns (list of `str`, *optional*):
838
+ The name of the columns on the table
839
+ data (List of List of Any data type, *optional*):
840
+ The data to be logged in the table. If `columns` is not specified, then the first entry in data will be
841
+ the name of the columns of the table
842
+ dataframe (Any data type, *optional*):
843
+ The data to be logged in the table
844
+ step (`int`, *optional*):
845
+ The run step. If included, the log will be affiliated with this step.
846
+ kwargs:
847
+ Additional key word arguments passed along to the `clearml.Logger.report_table` method.
848
+ """
849
+ to_report = dataframe
850
+ if dataframe is None:
851
+ if data is None:
852
+ raise ValueError(
853
+ "`ClearMLTracker.log_table` requires that `data` to be supplied if `dataframe` is `None`"
854
+ )
855
+ to_report = [columns] + data if columns else data
856
+ title, series = ClearMLTracker._get_title_series(table_name)
857
+ self.task.get_logger().report_table(title=title, series=series, table_plot=to_report, iteration=step, **kwargs)
858
+
859
+ @on_main_process
860
+ def finish(self):
861
+ """
862
+ Close the ClearML task. If the task was initialized externally (e.g. by manually calling `Task.init`), this
863
+ function is a noop
864
+ """
865
+ if self.task and not self._initialized_externally:
866
+ self.task.close()
867
+
868
+ @staticmethod
869
+ def _get_title_series(name):
870
+ for prefix in ["eval", "test", "train"]:
871
+ if name.startswith(prefix + "_"):
872
+ return name[len(prefix) + 1 :], prefix
873
+ return name, "train"
874
+
875
+
876
+ class DVCLiveTracker(GeneralTracker):
877
+ """
878
+ A `Tracker` class that supports `dvclive`. Should be initialized at the start of your script.
879
+
880
+ Args:
881
+ run_name (`str`, *optional*):
882
+ Ignored for dvclive. See `kwargs` instead.
883
+ kwargs:
884
+ Additional key word arguments passed along to [`dvclive.Live()`](https://dvc.org/doc/dvclive/live).
885
+
886
+ Example:
887
+
888
+ ```py
889
+ from accelerate import Accelerator
890
+
891
+ accelerator = Accelerator(log_with="dvclive")
892
+ accelerator.init_trackers(project_name="my_project", init_kwargs={"dvclive": {"dir": "my_directory"}})
893
+ ```
894
+ """
895
+
896
+ name = "dvclive"
897
+ requires_logging_directory = False
898
+
899
+ @on_main_process
900
+ def __init__(self, run_name: Optional[str] = None, live: Optional[Any] = None, **kwargs):
901
+ from dvclive import Live
902
+
903
+ super().__init__()
904
+ self.live = live if live is not None else Live(**kwargs)
905
+
906
+ @property
907
+ def tracker(self):
908
+ return self.live
909
+
910
+ @on_main_process
911
+ def store_init_configuration(self, values: dict):
912
+ """
913
+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. Stores the
914
+ hyperparameters in a yaml file for future use.
915
+
916
+ Args:
917
+ values (Dictionary `str` to `bool`, `str`, `float`, `int`, or a List or Dict of those types):
918
+ Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
919
+ `str`, `float`, or `int`.
920
+ """
921
+ self.live.log_params(values)
922
+
923
+ @on_main_process
924
+ def log(self, values: dict, step: Optional[int] = None, **kwargs):
925
+ """
926
+ Logs `values` to the current run.
927
+
928
+ Args:
929
+ values (Dictionary `str` to `str`, `float`, or `int`):
930
+ Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`.
931
+ step (`int`, *optional*):
932
+ The run step. If included, the log will be affiliated with this step.
933
+ kwargs:
934
+ Additional key word arguments passed along to `dvclive.Live.log_metric()`.
935
+ """
936
+ from dvclive.plots import Metric
937
+
938
+ if step is not None:
939
+ self.live.step = step
940
+ for k, v in values.items():
941
+ if Metric.could_log(v):
942
+ self.live.log_metric(k, v, **kwargs)
943
+ else:
944
+ logger.warning_once(
945
+ "Accelerator attempted to log a value of "
946
+ f'"{v}" of type {type(v)} for key "{k}" as a scalar. '
947
+ "This invocation of DVCLive's Live.log_metric() "
948
+ "is incorrect so we dropped this attribute."
949
+ )
950
+ self.live.next_step()
951
+
952
+ @on_main_process
953
+ def finish(self):
954
+ """
955
+ Closes `dvclive.Live()`.
956
+ """
957
+ self.live.end()
958
+
959
+
960
+ LOGGER_TYPE_TO_CLASS = {
961
+ "aim": AimTracker,
962
+ "comet_ml": CometMLTracker,
963
+ "mlflow": MLflowTracker,
964
+ "tensorboard": TensorBoardTracker,
965
+ "wandb": WandBTracker,
966
+ "clearml": ClearMLTracker,
967
+ "dvclive": DVCLiveTracker,
968
+ }
969
+
970
+
971
+ def filter_trackers(
972
+ log_with: List[Union[str, LoggerType, GeneralTracker]],
973
+ logging_dir: Union[str, os.PathLike] = None,
974
+ ):
975
+ """
976
+ Takes in a list of potential tracker types and checks that:
977
+ - The tracker wanted is available in that environment
978
+ - Filters out repeats of tracker types
979
+ - If `all` is in `log_with`, will return all trackers in the environment
980
+ - If a tracker requires a `logging_dir`, ensures that `logging_dir` is not `None`
981
+
982
+ Args:
983
+ log_with (list of `str`, [`~utils.LoggerType`] or [`~tracking.GeneralTracker`], *optional*):
984
+ A list of loggers to be setup for experiment tracking. Should be one or several of:
985
+
986
+ - `"all"`
987
+ - `"tensorboard"`
988
+ - `"wandb"`
989
+ - `"comet_ml"`
990
+ - `"mlflow"`
991
+ - `"dvclive"`
992
+ If `"all"` is selected, will pick up all available trackers in the environment and initialize them. Can
993
+ also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `"all"`.
994
+ logging_dir (`str`, `os.PathLike`, *optional*):
995
+ A path to a directory for storing logs of locally-compatible loggers.
996
+ """
997
+ loggers = []
998
+ if log_with is not None:
999
+ if not isinstance(log_with, (list, tuple)):
1000
+ log_with = [log_with]
1001
+ if "all" in log_with or LoggerType.ALL in log_with:
1002
+ loggers = [o for o in log_with if issubclass(type(o), GeneralTracker)] + get_available_trackers()
1003
+ else:
1004
+ for log_type in log_with:
1005
+ if log_type not in LoggerType and not issubclass(type(log_type), GeneralTracker):
1006
+ raise ValueError(f"Unsupported logging capability: {log_type}. Choose between {LoggerType.list()}")
1007
+ if issubclass(type(log_type), GeneralTracker):
1008
+ loggers.append(log_type)
1009
+ else:
1010
+ log_type = LoggerType(log_type)
1011
+ if log_type not in loggers:
1012
+ if log_type in get_available_trackers():
1013
+ tracker_init = LOGGER_TYPE_TO_CLASS[str(log_type)]
1014
+ if tracker_init.requires_logging_directory:
1015
+ if logging_dir is None:
1016
+ raise ValueError(
1017
+ f"Logging with `{log_type}` requires a `logging_dir` to be passed in."
1018
+ )
1019
+ loggers.append(log_type)
1020
+ else:
1021
+ logger.debug(f"Tried adding logger {log_type}, but package is unavailable in the system.")
1022
+
1023
+ return loggers
venv/lib/python3.10/site-packages/accelerate/utils/bnb.py ADDED
@@ -0,0 +1,467 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import logging
17
+ import os
18
+ from copy import deepcopy
19
+ from typing import Dict, List, Optional, Union
20
+
21
+ import torch
22
+ import torch.nn as nn
23
+
24
+ from accelerate.utils.imports import (
25
+ is_4bit_bnb_available,
26
+ is_8bit_bnb_available,
27
+ )
28
+
29
+ from ..big_modeling import dispatch_model, init_empty_weights
30
+ from .dataclasses import BnbQuantizationConfig
31
+ from .modeling import (
32
+ find_tied_parameters,
33
+ get_balanced_memory,
34
+ infer_auto_device_map,
35
+ load_checkpoint_in_model,
36
+ offload_weight,
37
+ set_module_tensor_to_device,
38
+ )
39
+
40
+
41
+ logger = logging.getLogger(__name__)
42
+
43
+
44
+ def load_and_quantize_model(
45
+ model: torch.nn.Module,
46
+ bnb_quantization_config: BnbQuantizationConfig,
47
+ weights_location: Union[str, os.PathLike] = None,
48
+ device_map: Optional[Dict[str, Union[int, str, torch.device]]] = None,
49
+ no_split_module_classes: Optional[List[str]] = None,
50
+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,
51
+ offload_folder: Optional[Union[str, os.PathLike]] = None,
52
+ offload_state_dict: bool = False,
53
+ ):
54
+ """
55
+ This function will quantize the input model with the associated config passed in `bnb_quantization_config`. If the
56
+ model is in the meta device, we will load and dispatch the weights according to the `device_map` passed. If the
57
+ model is already loaded, we will quantize the model and put the model on the GPU,
58
+
59
+ Args:
60
+ model (`torch.nn.Module`):
61
+ Input model. The model can be already loaded or on the meta device
62
+ bnb_quantization_config (`BnbQuantizationConfig`):
63
+ The bitsandbytes quantization parameters
64
+ weights_location (`str` or `os.PathLike`):
65
+ The folder weights_location to load. It can be:
66
+ - a path to a file containing a whole model state dict
67
+ - a path to a `.json` file containing the index to a sharded checkpoint
68
+ - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint.
69
+ - a path to a folder containing a unique pytorch_model.bin file.
70
+ device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*):
71
+ A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer
72
+ name, once a given module name is inside, every submodule of it will be sent to the same device.
73
+ no_split_module_classes (`List[str]`, *optional*):
74
+ A list of layer class names that should never be split across device (for instance any layer that has a
75
+ residual connection).
76
+ max_memory (`Dict`, *optional*):
77
+ A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset.
78
+ offload_folder (`str` or `os.PathLike`, *optional*):
79
+ If the `device_map` contains any value `"disk"`, the folder where we will offload weights.
80
+ offload_state_dict (`bool`, *optional*, defaults to `False`):
81
+ If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if
82
+ the weight of the CPU state dict + the biggest shard does not fit.
83
+
84
+ Returns:
85
+ `torch.nn.Module`: The quantized model
86
+ """
87
+
88
+ load_in_4bit = bnb_quantization_config.load_in_4bit
89
+ load_in_8bit = bnb_quantization_config.load_in_8bit
90
+
91
+ if load_in_8bit and not is_8bit_bnb_available():
92
+ raise ImportError(
93
+ "You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
94
+ " make sure you have the latest version of `bitsandbytes` installed."
95
+ )
96
+ if load_in_4bit and not is_4bit_bnb_available():
97
+ raise ValueError(
98
+ "You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
99
+ "make sure you have the latest version of `bitsandbytes` installed."
100
+ )
101
+
102
+ modules_on_cpu = []
103
+ # custom device map
104
+ if isinstance(device_map, dict) and len(device_map.keys()) > 1:
105
+ modules_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
106
+
107
+ # We keep some modules such as the lm_head in their original dtype for numerical stability reasons
108
+ if bnb_quantization_config.skip_modules is None:
109
+ bnb_quantization_config.skip_modules = get_keys_to_not_convert(model)
110
+
111
+ # add cpu modules to skip modules only for 4-bit modules
112
+ if load_in_4bit:
113
+ bnb_quantization_config.skip_modules.extend(modules_on_cpu)
114
+ modules_to_not_convert = bnb_quantization_config.skip_modules
115
+
116
+ # We add the modules we want to keep in full precision
117
+ if bnb_quantization_config.keep_in_fp32_modules is None:
118
+ bnb_quantization_config.keep_in_fp32_modules = []
119
+ keep_in_fp32_modules = bnb_quantization_config.keep_in_fp32_modules
120
+ modules_to_not_convert.extend(keep_in_fp32_modules)
121
+
122
+ # compatibility with peft
123
+ model.is_loaded_in_4bit = load_in_4bit
124
+ model.is_loaded_in_8bit = load_in_8bit
125
+
126
+ model_device = get_parameter_device(model)
127
+ if model_device.type != "meta":
128
+ # quantization of an already loaded model
129
+ logger.warning(
130
+ "It is not recommended to quantize a loaded model. "
131
+ "The model should be instantiated under the `init_empty_weights` context manager."
132
+ )
133
+ model = replace_with_bnb_layers(model, bnb_quantization_config, modules_to_not_convert=modules_to_not_convert)
134
+ # convert param to the right dtype
135
+ dtype = bnb_quantization_config.torch_dtype
136
+ for name, param in model.state_dict().items():
137
+ if any(module_to_keep_in_fp32 in name for module_to_keep_in_fp32 in keep_in_fp32_modules):
138
+ param.to(torch.float32)
139
+ if param.dtype != torch.float32:
140
+ name = name.replace(".weight", "").replace(".bias", "")
141
+ param = getattr(model, name, None)
142
+ if param is not None:
143
+ param.to(torch.float32)
144
+ elif torch.is_floating_point(param):
145
+ param.to(dtype)
146
+ if model_device.type == "cuda":
147
+ # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
148
+ model.cuda(torch.cuda.current_device())
149
+ torch.cuda.empty_cache()
150
+ elif torch.cuda.is_available():
151
+ model.to(torch.cuda.current_device())
152
+ else:
153
+ raise RuntimeError("No GPU found. A GPU is needed for quantization.")
154
+ logger.info(
155
+ f"The model device type is {model_device.type}. However, cuda is needed for quantization."
156
+ "We move the model to cuda."
157
+ )
158
+ return model
159
+
160
+ elif weights_location is None:
161
+ raise RuntimeError(
162
+ f"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} "
163
+ )
164
+
165
+ else:
166
+ with init_empty_weights():
167
+ model = replace_with_bnb_layers(
168
+ model, bnb_quantization_config, modules_to_not_convert=modules_to_not_convert
169
+ )
170
+
171
+ device_map = get_quantized_model_device_map(
172
+ model,
173
+ bnb_quantization_config,
174
+ device_map,
175
+ max_memory=max_memory,
176
+ no_split_module_classes=no_split_module_classes,
177
+ )
178
+ if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
179
+ offload_state_dict = True
180
+
181
+ offload = any(x in list(device_map.values()) for x in ["cpu", "disk"])
182
+
183
+ load_checkpoint_in_model(
184
+ model,
185
+ weights_location,
186
+ device_map,
187
+ dtype=bnb_quantization_config.torch_dtype,
188
+ offload_folder=offload_folder,
189
+ offload_state_dict=offload_state_dict,
190
+ keep_in_fp32_modules=bnb_quantization_config.keep_in_fp32_modules,
191
+ offload_8bit_bnb=load_in_8bit and offload,
192
+ )
193
+ return dispatch_model(model, device_map=device_map, offload_dir=offload_folder)
194
+
195
+
196
+ def get_quantized_model_device_map(
197
+ model, bnb_quantization_config, device_map=None, max_memory=None, no_split_module_classes=None
198
+ ):
199
+ if device_map is None:
200
+ if torch.cuda.is_available():
201
+ device_map = {"": torch.cuda.current_device()}
202
+ else:
203
+ raise RuntimeError("No GPU found. A GPU is needed for quantization.")
204
+ logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`.")
205
+
206
+ if isinstance(device_map, str):
207
+ if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
208
+ raise ValueError(
209
+ "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
210
+ "'sequential'."
211
+ )
212
+
213
+ special_dtypes = {}
214
+ special_dtypes.update(
215
+ {
216
+ name: bnb_quantization_config.torch_dtype
217
+ for name, _ in model.named_parameters()
218
+ if any(m in name for m in bnb_quantization_config.skip_modules)
219
+ }
220
+ )
221
+ special_dtypes.update(
222
+ {
223
+ name: torch.float32
224
+ for name, _ in model.named_parameters()
225
+ if any(m in name for m in bnb_quantization_config.keep_in_fp32_modules)
226
+ }
227
+ )
228
+
229
+ kwargs = {}
230
+ kwargs["special_dtypes"] = special_dtypes
231
+ kwargs["no_split_module_classes"] = no_split_module_classes
232
+ kwargs["dtype"] = bnb_quantization_config.target_dtype
233
+
234
+ # get max_memory for each device.
235
+ if device_map != "sequential":
236
+ max_memory = get_balanced_memory(
237
+ model,
238
+ low_zero=(device_map == "balanced_low_0"),
239
+ max_memory=max_memory,
240
+ **kwargs,
241
+ )
242
+
243
+ kwargs["max_memory"] = max_memory
244
+ device_map = infer_auto_device_map(model, **kwargs)
245
+
246
+ if isinstance(device_map, dict):
247
+ # check if don't have any quantized module on the cpu
248
+ modules_not_to_convert = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fp32_modules
249
+
250
+ device_map_without_some_modules = {
251
+ key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
252
+ }
253
+ for device in ["cpu", "disk"]:
254
+ if device in device_map_without_some_modules.values():
255
+ if bnb_quantization_config.load_in_4bit:
256
+ raise ValueError(
257
+ """
258
+ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
259
+ the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
260
+ these modules in `torch_dtype`, you need to pass a custom `device_map` to
261
+ `load_and_quantize_model`. Check
262
+ https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
263
+ for more details.
264
+ """
265
+ )
266
+ else:
267
+ logger.info(
268
+ "Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit"
269
+ )
270
+ del device_map_without_some_modules
271
+ return device_map
272
+
273
+
274
+ def replace_with_bnb_layers(model, bnb_quantization_config, modules_to_not_convert=None, current_key_name=None):
275
+ """
276
+ A helper function to replace all `torch.nn.Linear` modules by `bnb.nn.Linear8bit` modules or by `bnb.nn.Linear4bit`
277
+ modules from the `bitsandbytes`library. The function will be run recursively and replace `torch.nn.Linear` modules.
278
+
279
+ Parameters:
280
+ model (`torch.nn.Module`):
281
+ Input model or `torch.nn.Module` as the function is run recursively.
282
+ modules_to_not_convert (`List[str]`):
283
+ Names of the modules to not quantize convert. In practice we keep the `lm_head` in full precision for
284
+ numerical stability reasons.
285
+ current_key_name (`List[str]`, *optional*):
286
+ An array to track the current key of the recursion. This is used to check whether the current key (part of
287
+ it) is not in the list of modules to not convert.
288
+ """
289
+
290
+ if modules_to_not_convert is None:
291
+ modules_to_not_convert = []
292
+
293
+ model, has_been_replaced = _replace_with_bnb_layers(
294
+ model, bnb_quantization_config, modules_to_not_convert, current_key_name
295
+ )
296
+ if not has_been_replaced:
297
+ logger.warning(
298
+ "You are loading your model in 8bit or 4bit but no linear modules were found in your model."
299
+ " this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
300
+ " Please double check your model architecture, or submit an issue on github if you think this is"
301
+ " a bug."
302
+ )
303
+ return model
304
+
305
+
306
+ def _replace_with_bnb_layers(
307
+ model,
308
+ bnb_quantization_config,
309
+ modules_to_not_convert=None,
310
+ current_key_name=None,
311
+ ):
312
+ """
313
+ Private method that wraps the recursion for module replacement.
314
+
315
+ Returns the converted model and a boolean that indicates if the conversion has been successfull or not.
316
+ """
317
+ # bitsandbytes will initialize CUDA on import, so it needs to be imported lazily
318
+ import bitsandbytes as bnb
319
+
320
+ has_been_replaced = False
321
+ for name, module in model.named_children():
322
+ if current_key_name is None:
323
+ current_key_name = []
324
+ current_key_name.append(name)
325
+ if isinstance(module, nn.Linear) and name not in modules_to_not_convert:
326
+ # Check if the current key is not in the `modules_to_not_convert`
327
+ current_key_name_str = ".".join(current_key_name)
328
+ proceed = True
329
+ for key in modules_to_not_convert:
330
+ if (
331
+ (key in current_key_name_str) and (key + "." in current_key_name_str)
332
+ ) or key == current_key_name_str:
333
+ proceed = False
334
+ break
335
+ if proceed:
336
+ # Load bnb module with empty weight and replace ``nn.Linear` module
337
+ if bnb_quantization_config.load_in_8bit:
338
+ bnb_module = bnb.nn.Linear8bitLt(
339
+ module.in_features,
340
+ module.out_features,
341
+ module.bias is not None,
342
+ has_fp16_weights=False,
343
+ threshold=bnb_quantization_config.llm_int8_threshold,
344
+ )
345
+ elif bnb_quantization_config.load_in_4bit:
346
+ bnb_module = bnb.nn.Linear4bit(
347
+ module.in_features,
348
+ module.out_features,
349
+ module.bias is not None,
350
+ bnb_quantization_config.bnb_4bit_compute_dtype,
351
+ compress_statistics=bnb_quantization_config.bnb_4bit_use_double_quant,
352
+ quant_type=bnb_quantization_config.bnb_4bit_quant_type,
353
+ )
354
+ else:
355
+ raise ValueError("load_in_8bit and load_in_4bit can't be both False")
356
+ bnb_module.weight.data = module.weight.data
357
+ if module.bias is not None:
358
+ bnb_module.bias.data = module.bias.data
359
+ bnb_module.requires_grad_(False)
360
+ setattr(model, name, bnb_module)
361
+ has_been_replaced = True
362
+ if len(list(module.children())) > 0:
363
+ _, _has_been_replaced = _replace_with_bnb_layers(
364
+ module, bnb_quantization_config, modules_to_not_convert, current_key_name
365
+ )
366
+ has_been_replaced = has_been_replaced | _has_been_replaced
367
+ # Remove the last key for recursion
368
+ current_key_name.pop(-1)
369
+ return model, has_been_replaced
370
+
371
+
372
+ def get_keys_to_not_convert(model):
373
+ r"""
374
+ An utility function to get the key of the module to keep in full precision if any For example for CausalLM modules
375
+ we may want to keep the lm_head in full precision for numerical stability reasons. For other architectures, we want
376
+ to keep the tied weights of the model. The function will return a list of the keys of the modules to not convert in
377
+ int8.
378
+
379
+ Parameters:
380
+ model (`torch.nn.Module`):
381
+ Input model
382
+ """
383
+ # Create a copy of the model
384
+ with init_empty_weights():
385
+ tied_model = deepcopy(model) # this has 0 cost since it is done inside `init_empty_weights` context manager`
386
+
387
+ tied_params = find_tied_parameters(tied_model)
388
+ # For compatibility with Accelerate < 0.18
389
+ if isinstance(tied_params, dict):
390
+ tied_keys = sum(list(tied_params.values()), []) + list(tied_params.keys())
391
+ else:
392
+ tied_keys = sum(tied_params, [])
393
+ has_tied_params = len(tied_keys) > 0
394
+
395
+ # Check if it is a base model
396
+ is_base_model = False
397
+ if hasattr(model, "base_model_prefix"):
398
+ is_base_model = not hasattr(model, model.base_model_prefix)
399
+
400
+ # Ignore this for base models (BertModel, GPT2Model, etc.)
401
+ if (not has_tied_params) and is_base_model:
402
+ return []
403
+
404
+ # otherwise they have an attached head
405
+ list_modules = list(model.named_children())
406
+ list_last_module = [list_modules[-1][0]]
407
+
408
+ # add last module together with tied weights
409
+ intersection = set(list_last_module) - set(tied_keys)
410
+ list_untouched = list(set(tied_keys)) + list(intersection)
411
+
412
+ # remove ".weight" from the keys
413
+ names_to_remove = [".weight", ".bias"]
414
+ filtered_module_names = []
415
+ for name in list_untouched:
416
+ for name_to_remove in names_to_remove:
417
+ if name_to_remove in name:
418
+ name = name.replace(name_to_remove, "")
419
+ filtered_module_names.append(name)
420
+
421
+ return filtered_module_names
422
+
423
+
424
+ def has_4bit_bnb_layers(model):
425
+ """Check if we have `bnb.nn.Linear4bit` or `bnb.nn.Linear8bitLt` layers inside our model"""
426
+ # bitsandbytes will initialize CUDA on import, so it needs to be imported lazily
427
+ import bitsandbytes as bnb
428
+
429
+ for m in model.modules():
430
+ if isinstance(m, bnb.nn.Linear4bit):
431
+ return True
432
+ return False
433
+
434
+
435
+ def get_parameter_device(parameter: nn.Module):
436
+ return next(parameter.parameters()).device
437
+
438
+
439
+ def quantize_and_offload_8bit(model, param, param_name, new_dtype, offload_folder, offload_index, fp16_statistics):
440
+ # if it is not quantized, we quantize and offload the quantized weights and the SCB stats
441
+ if fp16_statistics is None:
442
+ set_module_tensor_to_device(model, param_name, 0, dtype=new_dtype, value=param)
443
+ tensor_name = param_name
444
+ module = model
445
+ if "." in tensor_name:
446
+ splits = tensor_name.split(".")
447
+ for split in splits[:-1]:
448
+ new_module = getattr(module, split)
449
+ if new_module is None:
450
+ raise ValueError(f"{module} has no attribute {split}.")
451
+ module = new_module
452
+ tensor_name = splits[-1]
453
+ # offload weights
454
+ module._parameters[tensor_name].requires_grad = False
455
+ offload_weight(module._parameters[tensor_name], param_name, offload_folder, index=offload_index)
456
+ if hasattr(module._parameters[tensor_name], "SCB"):
457
+ offload_weight(
458
+ module._parameters[tensor_name].SCB,
459
+ param_name.replace("weight", "SCB"),
460
+ offload_folder,
461
+ index=offload_index,
462
+ )
463
+ else:
464
+ offload_weight(param, param_name, offload_folder, index=offload_index)
465
+ offload_weight(fp16_statistics, param_name.replace("weight", "SCB"), offload_folder, index=offload_index)
466
+
467
+ set_module_tensor_to_device(model, param_name, "meta", dtype=new_dtype, value=torch.empty(*param.size()))
venv/lib/python3.10/site-packages/accelerate/utils/dataclasses.py ADDED
@@ -0,0 +1,1717 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ General namespace and dataclass related classes
17
+ """
18
+
19
+ import argparse
20
+ import copy
21
+ import enum
22
+ import functools
23
+ import os
24
+ import typing
25
+ import warnings
26
+ from contextlib import contextmanager
27
+ from dataclasses import dataclass, field
28
+ from datetime import timedelta
29
+ from typing import Any, Callable, Dict, Iterable, List, Literal, Optional, Tuple, get_args
30
+
31
+ import torch
32
+
33
+ from .constants import FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE
34
+ from .environment import str_to_bool
35
+ from .imports import is_cuda_available, is_npu_available, is_xpu_available
36
+ from .versions import compare_versions
37
+
38
+
39
+ class KwargsHandler:
40
+ """
41
+ Internal mixin that implements a `to_kwargs()` method for a dataclass.
42
+ """
43
+
44
+ def to_dict(self):
45
+ return copy.deepcopy(self.__dict__)
46
+
47
+ def to_kwargs(self):
48
+ """
49
+ Returns a dictionary containing the attributes with values different from the default of this class.
50
+ """
51
+ # import clear_environment here to avoid circular import problem
52
+ from .other import clear_environment
53
+
54
+ with clear_environment():
55
+ default_dict = self.__class__().to_dict()
56
+ this_dict = self.to_dict()
57
+ return {k: v for k, v in this_dict.items() if default_dict[k] != v}
58
+
59
+
60
+ @dataclass
61
+ class AutocastKwargs(KwargsHandler):
62
+ """
63
+ Use this object in your [`Accelerator`] to customize how `torch.autocast` behaves. Please refer to the
64
+ documentation of this [context manager](https://pytorch.org/docs/stable/amp.html#torch.autocast) for more
65
+ information on each argument.
66
+
67
+ Example:
68
+
69
+ ```python
70
+ from accelerate import Accelerator
71
+ from accelerate.utils import AutocastKwargs
72
+
73
+ kwargs = AutocastKwargs(cache_enabled=True)
74
+ accelerator = Accelerator(kwargs_handlers=[kwargs])
75
+ ```
76
+ """
77
+
78
+ enabled: bool = True
79
+ cache_enabled: bool = None
80
+
81
+
82
+ @dataclass
83
+ class DistributedDataParallelKwargs(KwargsHandler):
84
+ """
85
+ Use this object in your [`Accelerator`] to customize how your model is wrapped in a
86
+ `torch.nn.parallel.DistributedDataParallel`. Please refer to the documentation of this
87
+ [wrapper](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html) for more
88
+ information on each argument.
89
+
90
+ <Tip warning={true}>
91
+
92
+ `gradient_as_bucket_view` is only available in PyTorch 1.7.0 and later versions.
93
+
94
+ `static_graph` is only available in PyTorch 1.11.0 and later versions.
95
+
96
+ </Tip>
97
+
98
+ Example:
99
+
100
+ ```python
101
+ from accelerate import Accelerator
102
+ from accelerate.utils import DistributedDataParallelKwargs
103
+
104
+ kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
105
+ accelerator = Accelerator(kwargs_handlers=[kwargs])
106
+ ```
107
+ """
108
+
109
+ dim: int = 0
110
+ broadcast_buffers: bool = True
111
+ bucket_cap_mb: int = 25
112
+ find_unused_parameters: bool = False
113
+ check_reduction: bool = False
114
+ gradient_as_bucket_view: bool = False
115
+ static_graph: bool = False
116
+
117
+
118
+ @dataclass
119
+ class GradScalerKwargs(KwargsHandler):
120
+ """
121
+ Use this object in your [`Accelerator`] to customize the behavior of mixed precision, specifically how the
122
+ `torch.cuda.amp.GradScaler` used is created. Please refer to the documentation of this
123
+ [scaler](https://pytorch.org/docs/stable/amp.html?highlight=gradscaler) for more information on each argument.
124
+
125
+ <Tip warning={true}>
126
+
127
+ `GradScaler` is only available in PyTorch 1.5.0 and later versions.
128
+
129
+ </Tip>
130
+
131
+ Example:
132
+
133
+ ```python
134
+ from accelerate import Accelerator
135
+ from accelerate.utils import GradScalerKwargs
136
+
137
+ kwargs = GradScalerKwargs(backoff_filter=0.25)
138
+ accelerator = Accelerator(kwargs_handlers=[kwargs])
139
+ ```
140
+ """
141
+
142
+ init_scale: float = 65536.0
143
+ growth_factor: float = 2.0
144
+ backoff_factor: float = 0.5
145
+ growth_interval: int = 2000
146
+ enabled: bool = True
147
+
148
+
149
+ @dataclass
150
+ class InitProcessGroupKwargs(KwargsHandler):
151
+ """
152
+ Use this object in your [`Accelerator`] to customize the initialization of the distributed processes. Please refer
153
+ to the documentation of this
154
+ [method](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) for more
155
+ information on each argument.
156
+
157
+ ```python
158
+ from datetime import timedelta
159
+ from accelerate import Accelerator
160
+ from accelerate.utils import InitProcessGroupKwargs
161
+
162
+ kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=800))
163
+ accelerator = Accelerator(kwargs_handlers=[kwargs])
164
+ ```
165
+ """
166
+
167
+ backend: Optional[str] = "nccl"
168
+ init_method: Optional[str] = None
169
+ timeout: timedelta = timedelta(seconds=1800)
170
+
171
+
172
+ # Literals
173
+ Backend = Literal["MSAMP", "TE"]
174
+ OptLevel = Literal["O1", "O2"]
175
+ FP8Format = Literal["E4M3", "HYBRID"]
176
+ AmaxComputeAlgorithm = Literal["max", "most_recent"]
177
+
178
+
179
+ @dataclass
180
+ class FP8RecipeKwargs(KwargsHandler):
181
+ """
182
+ Use this object in your [`Accelerator`] to customize the initialization of the recipe for FP8 mixed precision
183
+ training with `transformer-engine` or `ms-amp`.
184
+
185
+ <Tip>
186
+
187
+ For more information on `transformer-engine` args, please refer to the API
188
+ [documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html).
189
+
190
+ For more information on the `ms-amp` args, please refer to the Optimization Level
191
+ [documentation](https://azure.github.io/MS-AMP/docs/user-tutorial/optimization-level).
192
+
193
+ </Tip>
194
+
195
+ ```python
196
+ from accelerate import Accelerator
197
+ from accelerate.utils import FP8RecipeKwargs
198
+
199
+ kwargs = FP8RecipeKwargs(backend="te", fp8_format="HYBRID")
200
+ accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=[kwargs])
201
+ ```
202
+
203
+ To use MS-AMP as an engine, pass `backend="msamp"` and the `optimization_level`:
204
+
205
+ ```python
206
+ kwargs = FP8RecipeKwargs(backend="msamp", optimization_level="02")
207
+ ```
208
+
209
+ Args:
210
+ backend (`str`, *optional*, defaults to "msamp"):
211
+ Which FP8 engine to use. Must be one of `"msamp"` (MS-AMP) or `"te"` (TransformerEngine).
212
+ margin (`int`, *optional*, default to 0):
213
+ The margin to use for the gradient scaling.
214
+ interval (`int`, *optional*, default to 1):
215
+ The interval to use for how often the scaling factor is recomputed.
216
+ fp8_format (`str`, *optional*, default to "E4M3"):
217
+ The format to use for the FP8 recipe. Must be one of `E4M3` or `HYBRID`.
218
+ amax_history_len (`int`, *optional*, default to 1024):
219
+ The length of the history to use for the scaling factor computation
220
+ amax_compute_algo (`str`, *optional*, default to "most_recent"):
221
+ The algorithm to use for the scaling factor computation. Must be one of `max` or `most_recent`.
222
+ override_linear_precision (`tuple` of three `bool`, *optional*, default to `(False, False, False)`):
223
+ Whether or not to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision.
224
+ optimization_level (`str`), one of `O1`, `O2`. (default is `O2`):
225
+ What level of 8-bit collective communication should be used with MS-AMP. In general:
226
+ * O1: Weight gradients and `all_reduce` communications are done in fp8, reducing GPU
227
+ memory usage and communication bandwidth
228
+ * O2: First-order optimizer states are in 8-bit, and second order states are in FP16.
229
+ Only available when using Adam or AdamW. This maintains accuracy and can potentially save the
230
+ highest memory.
231
+ * 03: Specifically for DeepSpeed, implements capabilities so weights and master weights of models
232
+ are stored in FP8. If `fp8` is selected and deepspeed is enabled, will be used by default. (Not
233
+ available currently).
234
+ """
235
+
236
+ backend: Backend = "MSAMP"
237
+ opt_level: OptLevel = "O2"
238
+ margin: int = 0
239
+ interval: int = 1
240
+ fp8_format: FP8Format = "E4M3"
241
+ amax_history_len: int = 1
242
+ amax_compute_algo: AmaxComputeAlgorithm = "most_recent"
243
+ override_linear_precision: Tuple[bool, bool, bool] = (False, False, False)
244
+
245
+ def __post_init__(self):
246
+ if self.backend.upper() not in get_args(Backend):
247
+ raise ValueError("`backend` must be 'MSAMP' or 'TE' (TransformerEngine).")
248
+
249
+ self.backend = self.backend.upper()
250
+ # Check TE args
251
+ if self.backend == "TE":
252
+ self.fp8_format = self.fp8_format.upper()
253
+ if self.fp8_format not in get_args(FP8Format):
254
+ raise ValueError(f"`fp8_format` must be one of {' or '.join(get_args(FP8Format))}.")
255
+ if self.amax_compute_algo not in get_args(AmaxComputeAlgorithm):
256
+ raise ValueError(f"`amax_compute_algo` must be one of {' or '.join(get_args(AmaxComputeAlgorithm))}")
257
+ elif self.backend == "MSAMP":
258
+ if self.opt_level not in get_args(OptLevel):
259
+ raise ValueError(f"`optimization_level` must be one of {' or '.join(get_args(OptLevel))}")
260
+
261
+
262
+ class EnumWithContains(enum.EnumMeta):
263
+ "A metaclass that adds the ability to check if `self` contains an item with the `in` operator"
264
+
265
+ def __contains__(cls, item):
266
+ try:
267
+ cls(item)
268
+ except ValueError:
269
+ return False
270
+ return True
271
+
272
+
273
+ class BaseEnum(enum.Enum, metaclass=EnumWithContains):
274
+ "An enum class that can get the value of an item with `str(Enum.key)`"
275
+
276
+ def __str__(self):
277
+ return self.value
278
+
279
+ @classmethod
280
+ def list(cls):
281
+ "Method to list all the possible items in `cls`"
282
+ return list(map(str, cls))
283
+
284
+
285
+ class DeprecatedFieldDescriptor:
286
+ """
287
+ Descriptor for deprecated fields in an enum class.
288
+
289
+ Args:
290
+ field_name (`str`):
291
+ The name of the deprecated field.
292
+ replaced_with (`str`):
293
+ The name of the field that replaces the deprecated one.
294
+ """
295
+
296
+ def __init__(self, field_name, replaced_with):
297
+ self.field_name = field_name
298
+ self.replaced_with = replaced_with
299
+
300
+ def __get__(self, instance, owner):
301
+ warnings.warn(
302
+ f"The `{self.field_name}` of `{owner}` is deprecated and will be removed in v1.0.0. "
303
+ f"Please use the `{self.replaced_with}` instead.",
304
+ FutureWarning,
305
+ )
306
+ return getattr(owner, self.replaced_with)
307
+
308
+
309
+ class DistributedType(str, enum.Enum):
310
+ """
311
+ Represents a type of distributed environment.
312
+
313
+ Values:
314
+
315
+ - **NO** -- Not a distributed environment, just a single process.
316
+ - **MULTI_CPU** -- Distributed on multiple CPU nodes.
317
+ - **MULTI_GPU** -- Distributed on multiple GPUs.
318
+ - **MULTI_MLU** -- Distributed on multiple MLUs.
319
+ - **MULTI_NPU** -- Distributed on multiple NPUs.
320
+ - **MULTI_XPU** -- Distributed on multiple XPUs.
321
+ - **DEEPSPEED** -- Using DeepSpeed.
322
+ - **XLA** -- Using TorchXLA.
323
+ - **TPU** -- This field will be deprecated in v0.27.0. Use XLA instead.
324
+ """
325
+
326
+ # Subclassing str as well as Enum allows the `DistributedType` to be JSON-serializable out of the box.
327
+ NO = "NO"
328
+ MULTI_CPU = "MULTI_CPU"
329
+ MULTI_GPU = "MULTI_GPU"
330
+ MULTI_NPU = "MULTI_NPU"
331
+ MULTI_MLU = "MULTI_MLU"
332
+ MULTI_XPU = "MULTI_XPU"
333
+ DEEPSPEED = "DEEPSPEED"
334
+ FSDP = "FSDP"
335
+ XLA = "XLA"
336
+ MEGATRON_LM = "MEGATRON_LM"
337
+ TPU = DeprecatedFieldDescriptor("TPU", "XLA")
338
+
339
+
340
+ class SageMakerDistributedType(str, enum.Enum):
341
+ """
342
+ Represents a type of distributed environment.
343
+
344
+ Values:
345
+
346
+ - **NO** -- Not a distributed environment, just a single process.
347
+ - **DATA_PARALLEL** -- using sagemaker distributed data parallelism.
348
+ - **MODEL_PARALLEL** -- using sagemaker distributed model parallelism.
349
+ """
350
+
351
+ # Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box.
352
+ NO = "NO"
353
+ DATA_PARALLEL = "DATA_PARALLEL"
354
+ MODEL_PARALLEL = "MODEL_PARALLEL"
355
+
356
+
357
+ class ComputeEnvironment(str, enum.Enum):
358
+ """
359
+ Represents a type of the compute environment.
360
+
361
+ Values:
362
+
363
+ - **LOCAL_MACHINE** -- private/custom cluster hardware.
364
+ - **AMAZON_SAGEMAKER** -- Amazon SageMaker as compute environment.
365
+ """
366
+
367
+ # Subclassing str as well as Enum allows the `ComputeEnvironment` to be JSON-serializable out of the box.
368
+ LOCAL_MACHINE = "LOCAL_MACHINE"
369
+ AMAZON_SAGEMAKER = "AMAZON_SAGEMAKER"
370
+
371
+
372
+ class DynamoBackend(str, BaseEnum):
373
+ """
374
+ Represents a dynamo backend (see https://pytorch.org/docs/stable/torch.compiler.html).
375
+
376
+ Values:
377
+
378
+ - **NO** -- Do not use torch dynamo.
379
+ - **EAGER** -- Uses PyTorch to run the extracted GraphModule. This is quite useful in debugging TorchDynamo
380
+ issues.
381
+ - **AOT_EAGER** -- Uses AotAutograd with no compiler, i.e, just using PyTorch eager for the AotAutograd's
382
+ extracted forward and backward graphs. This is useful for debugging, and unlikely to give speedups.
383
+ - **INDUCTOR** -- Uses TorchInductor backend with AotAutograd and cudagraphs by leveraging codegened Triton
384
+ kernels. [Read
385
+ more](https://dev-discuss.pytorch.org/t/torchinductor-a-pytorch-native-compiler-with-define-by-run-ir-and-symbolic-shapes/747)
386
+ - **AOT_TS_NVFUSER** -- nvFuser with AotAutograd/TorchScript. [Read
387
+ more](https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593)
388
+ - **NVPRIMS_NVFUSER** -- nvFuser with PrimTorch. [Read
389
+ more](https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593)
390
+ - **CUDAGRAPHS** -- cudagraphs with AotAutograd. [Read more](https://github.com/pytorch/torchdynamo/pull/757)
391
+ - **OFI** -- Uses Torchscript optimize_for_inference. Inference only. [Read
392
+ more](https://pytorch.org/docs/stable/generated/torch.jit.optimize_for_inference.html)
393
+ - **FX2TRT** -- Uses Nvidia TensorRT for inference optimizations. Inference only. [Read
394
+ more](https://github.com/pytorch/TensorRT/blob/master/docsrc/tutorials/getting_started_with_fx_path.rst)
395
+ - **ONNXRT** -- Uses ONNXRT for inference on CPU/GPU. Inference only. [Read more](https://onnxruntime.ai/)
396
+ - **TENSORRT** -- Uses ONNXRT to run TensorRT for inference optimizations. [Read
397
+ more](https://github.com/onnx/onnx-tensorrt)
398
+ - **IPEX** -- Uses IPEX for inference on CPU. Inference only. [Read
399
+ more](https://github.com/intel/intel-extension-for-pytorch).
400
+ - **TVM** -- Uses Apach TVM for inference optimizations. [Read more](https://tvm.apache.org/)
401
+
402
+ """
403
+
404
+ # Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box.
405
+ NO = "NO"
406
+ EAGER = "EAGER"
407
+ AOT_EAGER = "AOT_EAGER"
408
+ INDUCTOR = "INDUCTOR"
409
+ AOT_TS_NVFUSER = "AOT_TS_NVFUSER"
410
+ NVPRIMS_NVFUSER = "NVPRIMS_NVFUSER"
411
+ CUDAGRAPHS = "CUDAGRAPHS"
412
+ OFI = "OFI"
413
+ FX2TRT = "FX2TRT"
414
+ ONNXRT = "ONNXRT"
415
+ TENSORRT = "TENSORRT"
416
+ IPEX = "IPEX"
417
+ TVM = "TVM"
418
+
419
+
420
+ class LoggerType(BaseEnum):
421
+ """Represents a type of supported experiment tracker
422
+
423
+ Values:
424
+
425
+ - **ALL** -- all available trackers in the environment that are supported
426
+ - **TENSORBOARD** -- TensorBoard as an experiment tracker
427
+ - **WANDB** -- wandb as an experiment tracker
428
+ - **COMETML** -- comet_ml as an experiment tracker
429
+ - **DVCLIVE** -- dvclive as an experiment tracker
430
+ """
431
+
432
+ ALL = "all"
433
+ AIM = "aim"
434
+ TENSORBOARD = "tensorboard"
435
+ WANDB = "wandb"
436
+ COMETML = "comet_ml"
437
+ MLFLOW = "mlflow"
438
+ CLEARML = "clearml"
439
+ DVCLIVE = "dvclive"
440
+
441
+
442
+ class PrecisionType(BaseEnum):
443
+ """Represents a type of precision used on floating point values
444
+
445
+ Values:
446
+
447
+ - **NO** -- using full precision (FP32)
448
+ - **FP16** -- using half precision
449
+ - **BF16** -- using brain floating point precision
450
+ """
451
+
452
+ NO = "no"
453
+ FP8 = "fp8"
454
+ FP16 = "fp16"
455
+ BF16 = "bf16"
456
+
457
+
458
+ class RNGType(BaseEnum):
459
+ TORCH = "torch"
460
+ CUDA = "cuda"
461
+ MLU = "mlu"
462
+ NPU = "npu"
463
+ XLA = "xla"
464
+ XPU = "xpu"
465
+ GENERATOR = "generator"
466
+
467
+
468
+ class CustomDtype(enum.Enum):
469
+ r"""
470
+ An enum that contains multiple custom dtypes that can be used for `infer_auto_device_map`.
471
+ """
472
+
473
+ FP8 = "fp8"
474
+ INT4 = "int4"
475
+ INT2 = "int2"
476
+
477
+
478
+ # data classes
479
+
480
+
481
+ @dataclass
482
+ class TensorInformation:
483
+ shape: torch.Size
484
+ dtype: torch.dtype
485
+
486
+
487
+ @dataclass
488
+ class DataLoaderConfiguration:
489
+ """
490
+ Configuration for dataloader-related items when calling `accelerator.prepare`.
491
+ """
492
+
493
+ split_batches: bool = field(
494
+ default=False,
495
+ metadata={
496
+ "help": "Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If"
497
+ " `True` the actual batch size used will be the same on any kind of distributed processes, but it must be a"
498
+ " round multiple of the `num_processes` you are using. If `False`, actual batch size used will be the one set"
499
+ " in your script multiplied by the number of processes."
500
+ },
501
+ )
502
+ dispatch_batches: bool = field(
503
+ default=None,
504
+ metadata={
505
+ "help": "If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process"
506
+ " and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose"
507
+ " underlying dataset is an `IterableDataslet`, `False` otherwise."
508
+ },
509
+ )
510
+ even_batches: bool = field(
511
+ default=True,
512
+ metadata={
513
+ "help": "If set to `True`, in cases where the total batch size across all processes does not exactly divide the"
514
+ " dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among"
515
+ " all workers."
516
+ },
517
+ )
518
+ use_seedable_sampler: bool = field(
519
+ default=False,
520
+ metadata={
521
+ "help": "Whether or not use a fully seedable random sampler ([`data_loader.SeedableRandomSampler`])."
522
+ "Ensures training results are fully reproducable using a different sampling technique. "
523
+ "While seed-to-seed results may differ, on average the differences are neglible when using"
524
+ "multiple different seeds to compare. Should also be ran with [`~utils.set_seed`] for the best results."
525
+ },
526
+ )
527
+
528
+
529
+ @dataclass
530
+ class ProjectConfiguration:
531
+ """
532
+ Configuration for the Accelerator object based on inner-project needs.
533
+ """
534
+
535
+ project_dir: str = field(default=None, metadata={"help": "A path to a directory for storing data."})
536
+ logging_dir: str = field(
537
+ default=None,
538
+ metadata={
539
+ "help": "A path to a directory for storing logs of locally-compatible loggers. If None, defaults to `project_dir`."
540
+ },
541
+ )
542
+ automatic_checkpoint_naming: bool = field(
543
+ default=False,
544
+ metadata={"help": "Whether saved states should be automatically iteratively named."},
545
+ )
546
+
547
+ total_limit: int = field(
548
+ default=None,
549
+ metadata={"help": "The maximum number of total saved states to keep."},
550
+ )
551
+
552
+ iteration: int = field(
553
+ default=0,
554
+ metadata={"help": "The current save iteration."},
555
+ )
556
+
557
+ save_on_each_node: bool = field(
558
+ default=False,
559
+ metadata={
560
+ "help": (
561
+ "When doing multi-node distributed training, whether to save models and checkpoints on each node, or"
562
+ " only on the main one"
563
+ )
564
+ },
565
+ )
566
+
567
+ def set_directories(self, project_dir: str = None):
568
+ "Sets `self.project_dir` and `self.logging_dir` to the appropriate values."
569
+ self.project_dir = project_dir
570
+ if self.logging_dir is None:
571
+ self.logging_dir = project_dir
572
+
573
+ def __post_init__(self):
574
+ self.set_directories(self.project_dir)
575
+
576
+
577
+ @dataclass
578
+ class GradientAccumulationPlugin(KwargsHandler):
579
+ """
580
+ A plugin to configure gradient accumulation behavior. You can only pass one of `gradient_accumulation_plugin` or
581
+ `gradient_accumulation_steps` to [`Accelerator`]. Passing both raises an error.
582
+
583
+ Parameters:
584
+ num_steps (`int`):
585
+ The number of steps to accumulate gradients for.
586
+ adjust_scheduler (`bool`, *optional*, defaults to `True`):
587
+ Whether to adjust the scheduler steps to account for the number of steps being accumulated. Should be
588
+ `True` if the used scheduler was not adjusted for gradient accumulation.
589
+ sync_with_dataloader (`bool`, *optional*, defaults to `True`):
590
+ Whether to synchronize setting the gradients when at the end of the dataloader.
591
+ sync_each_batch (`bool`, *optional*):
592
+ Whether to synchronize setting the gradients at each data batch. Seting to `True` may reduce memory
593
+ requirements when using gradient accumulation with distributed training, at expense of speed.
594
+
595
+ Example:
596
+
597
+ ```python
598
+ from accelerate.utils import GradientAccumulationPlugin
599
+
600
+ gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=2)
601
+ accelerator = Accelerator(gradient_accumulation_plugin=gradient_accumulation_plugin)
602
+ ```
603
+ """
604
+
605
+ num_steps: int = field(default=None, metadata={"help": "The number of steps to accumulate gradients for."})
606
+ adjust_scheduler: bool = field(
607
+ default=True,
608
+ metadata={
609
+ "help": "Whether to adjust the scheduler steps to account for the number of steps being accumulated. Should be `True` if the used scheduler was not adjusted for gradient accumulation."
610
+ },
611
+ )
612
+ sync_with_dataloader: bool = field(
613
+ default=True,
614
+ metadata={
615
+ "help": "Whether to synchronize setting the gradients when at the end of the dataloader. Should only be set to `False` if you know what you're doing."
616
+ },
617
+ )
618
+ sync_each_batch: bool = field(
619
+ default=False,
620
+ metadata={
621
+ "help": "Whether to synchronize setting the gradients at each data batch. Setting to `True` may reduce memory requirements when using gradient accumulation with distributed training, at expense of speed."
622
+ },
623
+ )
624
+
625
+
626
+ @dataclass
627
+ class TorchDynamoPlugin(KwargsHandler):
628
+ """
629
+ This plugin is used to compile a model with PyTorch 2.0
630
+ """
631
+
632
+ backend: DynamoBackend = field(
633
+ default=None,
634
+ metadata={"help": f"Possible options are {[b.value.lower() for b in DynamoBackend]}"},
635
+ )
636
+ mode: str = field(
637
+ default=None, metadata={"help": "Possible options are 'default', 'reduce-overhead' or 'max-autotune'"}
638
+ )
639
+ fullgraph: bool = field(default=None, metadata={"help": "Whether it is ok to break model into several subgraphs"})
640
+ dynamic: bool = field(default=None, metadata={"help": "Whether to use dynamic shape for tracing"})
641
+ options: Any = field(default=None, metadata={"help": "A dictionary of options to pass to the backend."})
642
+ disable: bool = field(default=False, metadata={"help": "Turn torch.compile() into a no-op for testing"})
643
+
644
+ def __post_init__(self):
645
+ prefix = "ACCELERATE_DYNAMO_"
646
+ if self.backend is None:
647
+ self.backend = os.environ.get(prefix + "BACKEND", "no")
648
+ self.backend = DynamoBackend(self.backend.upper())
649
+ if self.mode is None:
650
+ self.mode = os.environ.get(prefix + "MODE", "default")
651
+ if self.fullgraph is None:
652
+ self.fullgraph = str_to_bool(os.environ.get(prefix + "USE_FULLGRAPH", "False")) == 1
653
+ if self.dynamic is None:
654
+ self.dynamic = str_to_bool(os.environ.get(prefix + "USE_DYNAMIC", "False")) == 1
655
+
656
+ def to_dict(self):
657
+ dynamo_config = copy.deepcopy(self.__dict__)
658
+ dynamo_config["backend"] = dynamo_config["backend"].value.lower()
659
+ return dynamo_config
660
+
661
+
662
+ @dataclass
663
+ class DeepSpeedPlugin:
664
+ """
665
+ This plugin is used to integrate DeepSpeed.
666
+ """
667
+
668
+ hf_ds_config: Any = field(
669
+ default=None,
670
+ metadata={
671
+ "help": "path to DeepSpeed config file or dict or an object of class `accelerate.utils.deepspeed.HfDeepSpeedConfig`."
672
+ },
673
+ )
674
+ gradient_accumulation_steps: int = field(
675
+ default=None,
676
+ metadata={
677
+ "help": "Number of steps to accumulate gradients before updating optimizer states. If not set, will use the value from the `Accelerator` directly."
678
+ },
679
+ )
680
+ gradient_clipping: float = field(default=None, metadata={"help": "Enable gradient clipping with value"})
681
+ zero_stage: int = field(
682
+ default=None,
683
+ metadata={"help": "Possible options are 0,1,2,3; Default will be taken from environment variable"},
684
+ )
685
+ is_train_batch_min: str = field(
686
+ default=True,
687
+ metadata={"help": "If both train & eval dataloaders are specified, this will decide the train_batch_size"},
688
+ )
689
+ offload_optimizer_device: bool = field(
690
+ default=None,
691
+ metadata={"help": "Possible options are none|cpu|nvme. Only applicable with ZeRO Stages 2 and 3."},
692
+ )
693
+ offload_param_device: bool = field(
694
+ default=None,
695
+ metadata={"help": "Possible options are none|cpu|nvme. Only applicable with ZeRO Stage 3."},
696
+ )
697
+ offload_optimizer_nvme_path: str = field(
698
+ default=None,
699
+ metadata={"help": "Possible options are /nvme|/local_nvme. Only applicable with ZeRO Stage 3."},
700
+ )
701
+ offload_param_nvme_path: str = field(
702
+ default=None,
703
+ metadata={"help": "Possible options are /nvme|/local_nvme. Only applicable with ZeRO Stage 3."},
704
+ )
705
+ zero3_init_flag: bool = field(
706
+ default=None,
707
+ metadata={
708
+ "help": "Flag to indicate whether to enable `deepspeed.zero.Init` for constructing massive models."
709
+ "Only applicable with ZeRO Stage-3."
710
+ },
711
+ )
712
+ zero3_save_16bit_model: bool = field(
713
+ default=None,
714
+ metadata={"help": "Flag to indicate whether to save 16-bit model. Only applicable with ZeRO Stage-3."},
715
+ )
716
+
717
+ def __post_init__(self):
718
+ from .deepspeed import HfDeepSpeedConfig
719
+
720
+ if self.gradient_accumulation_steps is None:
721
+ gas = os.environ.get("ACCELERATE_GRADIENT_ACCUMULATION_STEPS", "auto")
722
+ self.gradient_accumulation_steps = int(gas) if gas.isdigit() else gas
723
+
724
+ if self.gradient_clipping is None:
725
+ gradient_clipping = os.environ.get("ACCELERATE_GRADIENT_CLIPPING", "none")
726
+ if gradient_clipping != "none":
727
+ self.gradient_clipping = float(gradient_clipping)
728
+
729
+ if self.zero_stage is None:
730
+ self.zero_stage = int(os.environ.get("ACCELERATE_DEEPSPEED_ZERO_STAGE", 2))
731
+
732
+ if self.offload_optimizer_device is None:
733
+ self.offload_optimizer_device = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE", "none")
734
+
735
+ if self.offload_param_device is None:
736
+ self.offload_param_device = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE", "none")
737
+
738
+ if self.offload_optimizer_nvme_path is None:
739
+ self.offload_optimizer_nvme_path = os.environ.get(
740
+ "ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_NVME_PATH", "none"
741
+ )
742
+
743
+ if self.offload_param_nvme_path is None:
744
+ self.offload_param_nvme_path = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_NVME_PATH", "none")
745
+
746
+ if self.zero3_save_16bit_model is None:
747
+ self.zero3_save_16bit_model = (
748
+ os.environ.get("ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL", "false") == "true"
749
+ )
750
+
751
+ if self.hf_ds_config is None:
752
+ self.hf_ds_config = os.environ.get("ACCELERATE_DEEPSPEED_CONFIG_FILE", "none")
753
+ if (
754
+ isinstance(self.hf_ds_config, dict)
755
+ or (isinstance(self.hf_ds_config, str) and self.hf_ds_config != "none")
756
+ or isinstance(self.hf_ds_config, HfDeepSpeedConfig)
757
+ ):
758
+ if not isinstance(self.hf_ds_config, HfDeepSpeedConfig):
759
+ self.hf_ds_config = HfDeepSpeedConfig(self.hf_ds_config)
760
+ if "gradient_accumulation_steps" not in self.hf_ds_config.config:
761
+ self.hf_ds_config.config["gradient_accumulation_steps"] = 1
762
+ if "zero_optimization" not in self.hf_ds_config.config:
763
+ raise ValueError("Please specify the ZeRO optimization config in the DeepSpeed config.")
764
+
765
+ self._deepspeed_config_checks()
766
+ plugin_to_config_mapping = {
767
+ "gradient_accumulation_steps": "gradient_accumulation_steps",
768
+ "gradient_clipping": "gradient_clipping",
769
+ "zero_stage": "zero_optimization.stage",
770
+ "offload_optimizer_device": "zero_optimization.offload_optimizer.device",
771
+ "offload_param_device": "zero_optimization.offload_param.device",
772
+ "offload_param_nvme_path": "zero_optimization.offload_param.nvme_path",
773
+ "offload_optimizer_nvme_path": "zero_optimization.offload_optimizer.nvme_path",
774
+ "zero3_save_16bit_model": "zero_optimization.stage3_gather_16bit_weights_on_model_save",
775
+ }
776
+ kwargs = {v: getattr(self, k) for k, v in plugin_to_config_mapping.items() if getattr(self, k) is not None}
777
+ for key in kwargs.keys():
778
+ self.fill_match(key, **kwargs, must_match=False)
779
+ self.hf_ds_config.set_stage_and_offload()
780
+
781
+ # filling the missing values in the class attributes from the DeepSpeed config
782
+ # when using the DeepSpeed config file.
783
+ for key, value in plugin_to_config_mapping.items():
784
+ config_value = self.hf_ds_config.get_value(value)
785
+ if config_value is not None and config_value != "auto":
786
+ setattr(self, key, config_value)
787
+ else:
788
+ config = {
789
+ "train_batch_size": "auto",
790
+ "train_micro_batch_size_per_gpu": "auto",
791
+ "gradient_accumulation_steps": self.gradient_accumulation_steps,
792
+ "zero_optimization": {
793
+ "stage": self.zero_stage,
794
+ "offload_optimizer": {
795
+ "device": self.offload_optimizer_device,
796
+ "nvme_path": self.offload_optimizer_nvme_path
797
+ if self.offload_optimizer_device == "nvme"
798
+ else None,
799
+ },
800
+ "offload_param": {
801
+ "device": self.offload_param_device,
802
+ "nvme_path": self.offload_param_nvme_path if self.offload_param_device == "nvme" else None,
803
+ },
804
+ "stage3_gather_16bit_weights_on_model_save": self.zero3_save_16bit_model,
805
+ },
806
+ }
807
+ if self.gradient_clipping:
808
+ config["gradient_clipping"] = self.gradient_clipping
809
+ self.hf_ds_config = HfDeepSpeedConfig(config)
810
+
811
+ self.deepspeed_config = self.hf_ds_config.config
812
+ self.deepspeed_config["steps_per_print"] = float("inf") # this will stop deepspeed from logging @ stdout
813
+ if self.zero3_init_flag is None:
814
+ self.zero3_init_flag = (
815
+ str_to_bool(os.environ.get("ACCELERATE_DEEPSPEED_ZERO3_INIT", str(self.hf_ds_config.is_zero3()))) == 1
816
+ )
817
+ if self.zero3_init_flag and not self.hf_ds_config.is_zero3():
818
+ warnings.warn("DeepSpeed Zero3 Init flag is only applicable for ZeRO Stage 3. Setting it to False.")
819
+ self.zero3_init_flag = False
820
+
821
+ def fill_match(self, ds_key_long, mismatches=None, must_match=True, **kwargs):
822
+ mismatches = [] if mismatches is None else mismatches
823
+ config, ds_key = self.hf_ds_config.find_config_node(ds_key_long)
824
+ if config is None:
825
+ return
826
+
827
+ if config.get(ds_key) == "auto":
828
+ if ds_key_long in kwargs:
829
+ config[ds_key] = kwargs[ds_key_long]
830
+ return
831
+ else:
832
+ raise ValueError(
833
+ f"`{ds_key_long}` not found in kwargs. "
834
+ f"Please specify `{ds_key_long}` without `auto` (set to correct value) in the DeepSpeed config file or "
835
+ "pass it in kwargs."
836
+ )
837
+
838
+ if not must_match:
839
+ return
840
+
841
+ ds_val = config.get(ds_key)
842
+ if ds_val is not None and ds_key_long in kwargs:
843
+ if ds_val != kwargs[ds_key_long]:
844
+ mismatches.append(f"- ds {ds_key_long}={ds_val} vs arg {ds_key_long}={kwargs[ds_key_long]}")
845
+
846
+ def is_auto(self, ds_key_long):
847
+ val = self.hf_ds_config.get_value(ds_key_long)
848
+ if val is None:
849
+ return False
850
+ else:
851
+ return val == "auto"
852
+
853
+ def get_value(self, ds_key_long, default=None):
854
+ return self.hf_ds_config.get_value(ds_key_long, default)
855
+
856
+ def deepspeed_config_process(self, prefix="", mismatches=None, config=None, must_match=True, **kwargs):
857
+ """Process the DeepSpeed config with the values from the kwargs."""
858
+ mismatches = [] if mismatches is None else mismatches
859
+ if config is None:
860
+ config = self.deepspeed_config
861
+ for key, value in config.items():
862
+ if isinstance(value, dict):
863
+ self.deepspeed_config_process(
864
+ prefix=prefix + key + ".", mismatches=mismatches, config=value, must_match=must_match, **kwargs
865
+ )
866
+ else:
867
+ self.fill_match(prefix + key, mismatches, must_match=must_match, **kwargs)
868
+ if len(mismatches) > 0 and prefix == "":
869
+ mismatches_msg = "\n".join(mismatches)
870
+ raise ValueError(
871
+ "Please correct the following DeepSpeed config values that mismatch kwargs "
872
+ f" values:\n{mismatches_msg}\nThe easiest method is to set these DeepSpeed config values to 'auto'."
873
+ )
874
+
875
+ def set_mixed_precision(self, mixed_precision):
876
+ ds_config = self.deepspeed_config
877
+ kwargs = {
878
+ "fp16.enabled": mixed_precision == "fp16",
879
+ "bf16.enabled": mixed_precision == "bf16",
880
+ }
881
+ if mixed_precision == "fp16":
882
+ if "fp16" not in ds_config:
883
+ ds_config["fp16"] = {"enabled": True, "auto_cast": True}
884
+ elif mixed_precision == "bf16":
885
+ if "bf16" not in ds_config:
886
+ ds_config["bf16"] = {"enabled": True}
887
+
888
+ if mixed_precision != "no":
889
+ diff_dtype = "bf16" if mixed_precision == "fp16" else "fp16"
890
+ if str(ds_config.get(diff_dtype, {}).get("enabled", "False")).lower() == "true":
891
+ raise ValueError(
892
+ f"`--mixed_precision` arg cannot be set to `{mixed_precision}` when `{diff_dtype}` is set in the DeepSpeed config file."
893
+ )
894
+ for dtype in ["fp16", "bf16"]:
895
+ if dtype not in ds_config:
896
+ ds_config[dtype] = {"enabled": False}
897
+ self.fill_match("fp16.enabled", must_match=False, **kwargs)
898
+ self.fill_match("bf16.enabled", must_match=False, **kwargs)
899
+
900
+ def set_deepspeed_weakref(self):
901
+ from .imports import is_transformers_available
902
+
903
+ if self.zero3_init_flag:
904
+ if not is_transformers_available():
905
+ raise Exception(
906
+ "When `zero3_init_flag` is set, it requires Transformers to be installed. "
907
+ "Please run `pip install transformers`."
908
+ )
909
+ ds_config = copy.deepcopy(self.deepspeed_config)
910
+ if "gradient_accumulation_steps" not in ds_config or ds_config["gradient_accumulation_steps"] == "auto":
911
+ ds_config["gradient_accumulation_steps"] = 1
912
+ if (
913
+ "train_micro_batch_size_per_gpu" not in ds_config
914
+ or ds_config["train_micro_batch_size_per_gpu"] == "auto"
915
+ ):
916
+ ds_config["train_micro_batch_size_per_gpu"] = 1
917
+ if ds_config.get("train_batch_size", None) == "auto":
918
+ del ds_config["train_batch_size"]
919
+
920
+ if compare_versions("transformers", "<", "4.33"):
921
+ from transformers.deepspeed import HfDeepSpeedConfig
922
+ else:
923
+ from transformers.integrations import HfDeepSpeedConfig
924
+
925
+ self.dschf = HfDeepSpeedConfig(ds_config) # keep this object alive # noqa
926
+
927
+ def is_zero3_init_enabled(self):
928
+ return self.zero3_init_flag
929
+
930
+ @contextmanager
931
+ def zero3_init_context_manager(self, enable=False):
932
+ old = self.zero3_init_flag
933
+ if old == enable:
934
+ yield
935
+ else:
936
+ self.zero3_init_flag = enable
937
+ self.dschf = None
938
+ self.set_deepspeed_weakref()
939
+ yield
940
+ self.zero3_init_flag = old
941
+ self.dschf = None
942
+ self.set_deepspeed_weakref()
943
+
944
+ def _deepspeed_config_checks(self):
945
+ env_variable_names_to_ignore = [
946
+ "ACCELERATE_GRADIENT_ACCUMULATION_STEPS",
947
+ "ACCELERATE_GRADIENT_CLIPPING",
948
+ "ACCELERATE_DEEPSPEED_ZERO_STAGE",
949
+ "ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE",
950
+ "ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE",
951
+ "ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_NVME_PATH",
952
+ "ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_NVME_PATH",
953
+ "ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL",
954
+ "ACCELERATE_MIXED_PRECISION",
955
+ ]
956
+ env_variable_names_to_ignore = [
957
+ name.replace("ACCELERATE_", "").replace("DEEPSPEED_", "").lower() for name in env_variable_names_to_ignore
958
+ ]
959
+
960
+ deepspeed_fields_from_accelerate_config = os.environ.get("ACCELERATE_CONFIG_DS_FIELDS", "").split(",")
961
+
962
+ if any(name in env_variable_names_to_ignore for name in deepspeed_fields_from_accelerate_config):
963
+ raise ValueError(
964
+ f"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\n"
965
+ "Please specify them appropriately in the DeepSpeed config file.\n"
966
+ "If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\n"
967
+ "The easiest method is to create a new config following the questionnaire via `accelerate config`.\n"
968
+ "It will only ask for the necessary config variables when using `deepspeed_config_file`."
969
+ )
970
+
971
+
972
+ @dataclass
973
+ class FullyShardedDataParallelPlugin:
974
+ """
975
+ This plugin is used to enable fully sharded data parallelism.
976
+ """
977
+
978
+ sharding_strategy: "typing.Any" = field(
979
+ default=None,
980
+ metadata={
981
+ "help": "FSDP Sharding Strategy of type `torch.distributed.fsdp.fully_sharded_data_parallel.ShardingStrategy`"
982
+ },
983
+ )
984
+ backward_prefetch: "typing.Any" = field(
985
+ default=None,
986
+ metadata={
987
+ "help": "FSDP Backward Prefetch of type `torch.distributed.fsdp.fully_sharded_data_parallel.BackwardPrefetch`"
988
+ },
989
+ )
990
+ mixed_precision_policy: "typing.Any" = field(
991
+ default=None,
992
+ metadata={
993
+ "help": "A config to enable mixed precision training with FullyShardedDataParallel. "
994
+ "The 3 flags that are set are `param_dtype`, `reduce_dtype`, `buffer_dtype`. "
995
+ "Each flag expects `torch.dtype` as the value. "
996
+ "It is of type `torch.distributed.fsdp.fully_sharded_data_parallel.MixedPrecision`."
997
+ },
998
+ )
999
+ auto_wrap_policy: Optional[Callable] = field(
1000
+ default=None,
1001
+ metadata={"help": "A callable specifying a policy to recursively wrap layers with FSDP"},
1002
+ )
1003
+ cpu_offload: "typing.Any" = field(
1004
+ default=None,
1005
+ metadata={
1006
+ "help": "Decides Whether to offload parameters and gradients to CPU. "
1007
+ "It is of type `torch.distributed.fsdp.fully_sharded_data_parallel.CPUOffload`."
1008
+ },
1009
+ )
1010
+ ignored_modules: Optional[Iterable[torch.nn.Module]] = field(
1011
+ default=None,
1012
+ metadata={"help": "A list of modules to ignore for FSDP."},
1013
+ )
1014
+ state_dict_type: "typing.Any" = field(
1015
+ default=None,
1016
+ metadata={
1017
+ "help": "FSDP State Dict Type of type `torch.distributed.fsdp.fully_sharded_data_parallel.StateDictType`"
1018
+ },
1019
+ )
1020
+ state_dict_config: "typing.Any" = field(
1021
+ default=None,
1022
+ metadata={
1023
+ "help": "FSDP State Dict Config of type `torch.distributed.fsdp.fully_sharded_data_parallel.StateDictConfig`"
1024
+ },
1025
+ )
1026
+ optim_state_dict_config: "typing.Any" = field(
1027
+ default=None,
1028
+ metadata={
1029
+ "help": "FSDP Optimizer State Dict Config of type `torch.distributed.fsdp.fully_sharded_data_parallel.OptimStateDictConfig`"
1030
+ },
1031
+ )
1032
+ limit_all_gathers: bool = field(
1033
+ default=True,
1034
+ metadata={
1035
+ "help": "If False, then FSDP allows the CPU thread to schedule all-gathers "
1036
+ "without any extra synchronization. If True, then FSDP explicitly synchronizes the CPU thread to prevent "
1037
+ "too many in-flight all-gathers. This bool only affects the sharded strategies that schedule all-gathers. "
1038
+ "Enabling this can help lower the number of CUDA malloc retries."
1039
+ },
1040
+ )
1041
+ use_orig_params: bool = field(
1042
+ default=True,
1043
+ metadata={
1044
+ "help": "If `True`, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable parameters. "
1045
+ "Useful in cases such as parameter-efficient fine-tuning. "
1046
+ "Please refer this [blog](https://dev-discuss.pytorch.org/t/rethinking-pytorch-fully-sharded-data-parallel-fsdp-from-first-principles/1019). "
1047
+ "This also enables multiple optimizer param groups. This should be `True` when creating an optimizer object before preparing/wrapping the model with FSDP."
1048
+ },
1049
+ )
1050
+ param_init_fn: Optional[Callable[[torch.nn.Module], None]] = field(
1051
+ default=None,
1052
+ metadata={
1053
+ "help": "A Callable[torch.nn.Module] -> None that specifies how modules "
1054
+ "that are currently on the meta device should be initialized onto an actual device."
1055
+ },
1056
+ )
1057
+ sync_module_states: bool = field(
1058
+ default=True,
1059
+ metadata={
1060
+ "help": "If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0 "
1061
+ "to ensure they are the same across all ranks after initialization"
1062
+ },
1063
+ )
1064
+ forward_prefetch: bool = field(
1065
+ default=False,
1066
+ metadata={
1067
+ "help": "If True, then FSDP explicitly prefetches the next upcoming "
1068
+ "all-gather while executing in the forward pass. only use with Static graphs."
1069
+ },
1070
+ )
1071
+ activation_checkpointing: bool = field(
1072
+ default=False,
1073
+ metadata={
1074
+ "help": "If True, activation checkpointing is a technique to reduce memory usage by clearing activations of "
1075
+ "certain layers and recomputing them during a backward pass. Effectively, this trades extra computation time "
1076
+ "for reduced memory usage."
1077
+ },
1078
+ )
1079
+
1080
+ def __post_init__(self):
1081
+ from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch, CPUOffload, ShardingStrategy
1082
+
1083
+ prefix = "FSDP_"
1084
+ if self.sharding_strategy is None:
1085
+ sharding_strategy = os.environ.get(prefix + "SHARDING_STRATEGY", "FULL_SHARD")
1086
+ sharding_strategy = (
1087
+ FSDP_SHARDING_STRATEGY.index(sharding_strategy) + 1
1088
+ if not sharding_strategy.isdigit()
1089
+ else int(sharding_strategy)
1090
+ )
1091
+ self.sharding_strategy = ShardingStrategy(sharding_strategy)
1092
+
1093
+ if self.cpu_offload is None:
1094
+ if str_to_bool(os.environ.get(prefix + "OFFLOAD_PARAMS", "False")) == 1:
1095
+ self.cpu_offload = CPUOffload(offload_params=True)
1096
+ else:
1097
+ self.cpu_offload = CPUOffload(offload_params=False)
1098
+
1099
+ if self.backward_prefetch is None:
1100
+ prefetch_policy = os.environ.get(prefix + "BACKWARD_PREFETCH", "NO_PREFETCH")
1101
+ if prefetch_policy != FSDP_BACKWARD_PREFETCH[-1]:
1102
+ self.backward_prefetch = BackwardPrefetch(FSDP_BACKWARD_PREFETCH.index(prefetch_policy) + 1)
1103
+
1104
+ if self.state_dict_type is None:
1105
+ state_dict_type_policy = os.environ.get(prefix + "STATE_DICT_TYPE", "FULL_STATE_DICT")
1106
+ self.set_state_dict_type(state_dict_type_policy)
1107
+ self.use_orig_params = str_to_bool(os.environ.get(prefix + "USE_ORIG_PARAMS", "False")) == 1
1108
+ self.sync_module_states = str_to_bool(os.environ.get(prefix + "SYNC_MODULE_STATES", "True")) == 1
1109
+ self.forward_prefetch = str_to_bool(os.environ.get(prefix + "FORWARD_PREFETCH", "False")) == 1
1110
+ self.activation_checkpointing = str_to_bool(os.environ.get(prefix + "ACTIVATION_CHECKPOINTING", "False")) == 1
1111
+
1112
+ if self.sync_module_states:
1113
+ if is_npu_available():
1114
+ device = torch.npu.current_device()
1115
+ elif is_cuda_available():
1116
+ device = torch.cuda.current_device()
1117
+ elif is_xpu_available():
1118
+ device = torch.xpu.current_device()
1119
+ else:
1120
+ raise RuntimeError(
1121
+ "There are currently no available devices found, must be one of 'XPU', 'CUDA', or 'NPU'."
1122
+ )
1123
+ self.param_init_fn = lambda x: x.to_empty(device=device, recurse=False)
1124
+
1125
+ @staticmethod
1126
+ def get_module_class_from_name(module, name):
1127
+ """
1128
+ Gets a class from a module by its name.
1129
+
1130
+ Args:
1131
+ module (`torch.nn.Module`): The module to get the class from.
1132
+ name (`str`): The name of the class.
1133
+ """
1134
+ modules_children = list(module.children())
1135
+ if module.__class__.__name__ == name:
1136
+ return module.__class__
1137
+ elif len(modules_children) == 0:
1138
+ return
1139
+ else:
1140
+ for child_module in modules_children:
1141
+ module_class = FullyShardedDataParallelPlugin.get_module_class_from_name(child_module, name)
1142
+ if module_class is not None:
1143
+ return module_class
1144
+
1145
+ def set_auto_wrap_policy(self, model):
1146
+ from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy
1147
+
1148
+ default_transformer_cls_names_to_wrap = (
1149
+ ",".join(model._no_split_modules) if getattr(model, "_no_split_modules", None) is not None else ""
1150
+ )
1151
+ if self.auto_wrap_policy is None:
1152
+ auto_wrap_policy = os.environ.get("FSDP_AUTO_WRAP_POLICY", "NO_WRAP")
1153
+ if auto_wrap_policy == FSDP_AUTO_WRAP_POLICY[0]:
1154
+ transformer_cls_names_to_wrap = os.environ.get(
1155
+ "FSDP_TRANSFORMER_CLS_TO_WRAP", default_transformer_cls_names_to_wrap
1156
+ ).split(",")
1157
+ transformer_cls_to_wrap = set()
1158
+ for layer_class in transformer_cls_names_to_wrap:
1159
+ transformer_cls = FullyShardedDataParallelPlugin.get_module_class_from_name(model, layer_class)
1160
+ if transformer_cls is None:
1161
+ raise Exception("Could not find the transformer layer class to wrap in the model.")
1162
+ else:
1163
+ transformer_cls_to_wrap.add(transformer_cls)
1164
+
1165
+ self.auto_wrap_policy = functools.partial(
1166
+ transformer_auto_wrap_policy,
1167
+ # Transformer layer class to wrap
1168
+ transformer_layer_cls=transformer_cls_to_wrap,
1169
+ )
1170
+ elif auto_wrap_policy == FSDP_AUTO_WRAP_POLICY[1]:
1171
+ min_num_params = int(os.environ.get("FSDP_MIN_NUM_PARAMS", 0))
1172
+ if min_num_params > 0:
1173
+ self.auto_wrap_policy = functools.partial(
1174
+ size_based_auto_wrap_policy, min_num_params=min_num_params
1175
+ )
1176
+
1177
+ def set_mixed_precision(self, mixed_precision, buffer_autocast=False, override=False):
1178
+ if isinstance(mixed_precision, str):
1179
+ if mixed_precision == "fp16":
1180
+ dtype = torch.float16
1181
+ elif mixed_precision == "bf16":
1182
+ dtype = torch.bfloat16
1183
+ elif mixed_precision == "fp32":
1184
+ dtype = torch.float32
1185
+ else:
1186
+ raise ValueError(f"Unknown mixed precision value: {mixed_precision}")
1187
+ else:
1188
+ dtype = mixed_precision
1189
+
1190
+ buffer_dtype = torch.float32 if buffer_autocast else dtype
1191
+ from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
1192
+
1193
+ if self.mixed_precision_policy is None or override:
1194
+ self.mixed_precision_policy = MixedPrecision(
1195
+ param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=buffer_dtype
1196
+ )
1197
+
1198
+ def set_state_dict_type(self, state_dict_type_policy):
1199
+ from torch.distributed.fsdp.fully_sharded_data_parallel import (
1200
+ FullOptimStateDictConfig,
1201
+ FullStateDictConfig,
1202
+ StateDictType,
1203
+ )
1204
+
1205
+ self.state_dict_type = StateDictType(FSDP_STATE_DICT_TYPE.index(state_dict_type_policy) + 1)
1206
+
1207
+ if self.state_dict_type == StateDictType.FULL_STATE_DICT:
1208
+ if self.state_dict_config is None:
1209
+ self.state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
1210
+ if self.optim_state_dict_config is None:
1211
+ self.optim_state_dict_config = FullOptimStateDictConfig(offload_to_cpu=True, rank0_only=True)
1212
+
1213
+
1214
+ @dataclass
1215
+ class MegatronLMPlugin:
1216
+ """
1217
+ Plugin for Megatron-LM to enable tensor, pipeline, sequence and data parallelism. Also to enable selective
1218
+ activation recomputation and optimized fused kernels.
1219
+ """
1220
+
1221
+ tp_degree: int = field(default=None, metadata={"help": "tensor parallelism degree."})
1222
+ pp_degree: int = field(default=None, metadata={"help": "pipeline parallelism degree."})
1223
+ num_micro_batches: int = field(default=None, metadata={"help": "number of micro-batches."})
1224
+ gradient_clipping: float = field(
1225
+ default=None, metadata={"help": "gradient clipping value based on global L2 Norm (0 to disable)"}
1226
+ )
1227
+ sequence_parallelism: bool = field(
1228
+ default=None,
1229
+ metadata={"help": "enable sequence parallelism"},
1230
+ )
1231
+ recompute_activations: bool = field(
1232
+ default=None,
1233
+ metadata={"help": "enable selective activation recomputation"},
1234
+ )
1235
+ use_distributed_optimizer: bool = field(
1236
+ default=None,
1237
+ metadata={"help": "enable distributed optimizer"},
1238
+ )
1239
+ pipeline_model_parallel_split_rank: int = field(
1240
+ default=None, metadata={"help": "Rank where encoder and decoder should be split."}
1241
+ )
1242
+ num_layers_per_virtual_pipeline_stage: int = field(
1243
+ default=None, metadata={"help": "Number of layers per virtual pipeline stage."}
1244
+ )
1245
+ is_train_batch_min: str = field(
1246
+ default=True,
1247
+ metadata={"help": "If both train & eval dataloaders are specified, this will decide the micro_batch_size"},
1248
+ )
1249
+ train_iters: int = field(
1250
+ default=None,
1251
+ metadata={
1252
+ "help": "Total number of iterations to train over all training runs. "
1253
+ "Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`"
1254
+ },
1255
+ )
1256
+ train_samples: int = field(
1257
+ default=None,
1258
+ metadata={
1259
+ "help": "Total number of samples to train over all training runs. "
1260
+ "Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`"
1261
+ },
1262
+ )
1263
+ weight_decay_incr_style: str = field(
1264
+ default="constant",
1265
+ metadata={"help": 'Weight decay increment function. choices=["constant", "linear", "cosine"]. '},
1266
+ )
1267
+ start_weight_decay: float = field(
1268
+ default=None,
1269
+ metadata={"help": "Initial weight decay coefficient for L2 regularization."},
1270
+ )
1271
+ end_weight_decay: float = field(
1272
+ default=None,
1273
+ metadata={"help": "End of run weight decay coefficient for L2 regularization."},
1274
+ )
1275
+ lr_decay_style: str = field(
1276
+ default="linear",
1277
+ metadata={"help": "Learning rate decay function. choices=['constant', 'linear', 'cosine']."},
1278
+ )
1279
+ lr_decay_iters: int = field(
1280
+ default=None,
1281
+ metadata={"help": "Number of iterations for learning rate decay. If None defaults to `train_iters`."},
1282
+ )
1283
+ lr_decay_samples: int = field(
1284
+ default=None,
1285
+ metadata={"help": "Number of samples for learning rate decay. If None defaults to `train_samples`."},
1286
+ )
1287
+ lr_warmup_iters: int = field(
1288
+ default=None,
1289
+ metadata={"help": "number of iterations to linearly warmup learning rate over."},
1290
+ )
1291
+ lr_warmup_samples: int = field(
1292
+ default=None,
1293
+ metadata={"help": "number of samples to linearly warmup learning rate over."},
1294
+ )
1295
+ lr_warmup_fraction: float = field(
1296
+ default=None,
1297
+ metadata={"help": "fraction of lr-warmup-(iters/samples) to linearly warmup learning rate over."},
1298
+ )
1299
+ min_lr: float = field(
1300
+ default=0,
1301
+ metadata={"help": "Minumum value for learning rate. The scheduler clip values below this threshold."},
1302
+ )
1303
+ consumed_samples: List[int] = field(
1304
+ default=None,
1305
+ metadata={
1306
+ "help": "Number of samples consumed in the same order as the dataloaders to `accelerator.prepare` call."
1307
+ },
1308
+ )
1309
+ no_wd_decay_cond: Optional[Callable] = field(default=None, metadata={"help": "Condition to disable weight decay."})
1310
+ scale_lr_cond: Optional[Callable] = field(default=None, metadata={"help": "Condition to scale learning rate."})
1311
+ lr_mult: float = field(default=1.0, metadata={"help": "Learning rate multiplier."})
1312
+ megatron_dataset_flag: bool = field(
1313
+ default=False,
1314
+ metadata={"help": "Whether the format of dataset follows Megatron-LM Indexed/Cached/MemoryMapped format."},
1315
+ )
1316
+ seq_length: int = field(
1317
+ default=None,
1318
+ metadata={"help": "Maximum sequence length to process."},
1319
+ )
1320
+ encoder_seq_length: int = field(
1321
+ default=None,
1322
+ metadata={"help": "Maximum sequence length to process for the encoder."},
1323
+ )
1324
+ decoder_seq_length: int = field(
1325
+ default=None,
1326
+ metadata={"help": "Maximum sequence length to process for the decoder."},
1327
+ )
1328
+ tensorboard_dir: str = field(
1329
+ default=None,
1330
+ metadata={"help": "Path to save tensorboard logs."},
1331
+ )
1332
+ set_all_logging_options: bool = field(
1333
+ default=False,
1334
+ metadata={"help": "Whether to set all logging options."},
1335
+ )
1336
+ eval_iters: int = field(
1337
+ default=100, metadata={"help": "Number of iterations to run for evaluation validation/test for."}
1338
+ )
1339
+ eval_interval: int = field(
1340
+ default=1000, metadata={"help": "Interval between running evaluation on validation set."}
1341
+ )
1342
+ return_logits: bool = field(
1343
+ default=False,
1344
+ metadata={"help": "Whether to return logits from the model."},
1345
+ )
1346
+
1347
+ # custom train step args
1348
+ custom_train_step_class: Optional[Any] = field(
1349
+ default=None,
1350
+ metadata={"help": "Custom train step class."},
1351
+ )
1352
+ custom_train_step_kwargs: Optional[Dict[str, Any]] = field(
1353
+ default=None,
1354
+ metadata={"help": "Custom train step kwargs."},
1355
+ )
1356
+
1357
+ # custom model args
1358
+ custom_model_provider_function: Optional[Callable] = field(
1359
+ default=None,
1360
+ metadata={"help": "Custom model provider function."},
1361
+ )
1362
+ custom_prepare_model_function: Optional[Callable] = field(
1363
+ default=None,
1364
+ metadata={"help": "Custom prepare model function."},
1365
+ )
1366
+
1367
+ # remaining args such as enabling Alibi/ROPE positional embeddings,
1368
+ # wandb logging, Multi-Query Attention, etc.
1369
+ other_megatron_args: Optional[Dict[str, Any]] = field(
1370
+ default=None,
1371
+ metadata={"help": "Other Megatron-LM arguments. Please refer Megatron-LM"},
1372
+ )
1373
+
1374
+ def __post_init__(self):
1375
+ prefix = "MEGATRON_LM_"
1376
+ if self.tp_degree is None:
1377
+ self.tp_degree = int(os.environ.get(prefix + "TP_DEGREE", 1))
1378
+ if self.pp_degree is None:
1379
+ self.pp_degree = int(os.environ.get(prefix + "PP_DEGREE", 1))
1380
+ if self.num_micro_batches is None:
1381
+ self.num_micro_batches = int(os.environ.get(prefix + "NUM_MICRO_BATCHES", 1))
1382
+ if self.gradient_clipping is None:
1383
+ self.gradient_clipping = float(os.environ.get(prefix + "GRADIENT_CLIPPING", 1.0))
1384
+ if self.recompute_activations is None:
1385
+ self.recompute_activations = str_to_bool(os.environ.get(prefix + "RECOMPUTE_ACTIVATIONS", "False")) == 1
1386
+ if self.use_distributed_optimizer is None:
1387
+ self.use_distributed_optimizer = (
1388
+ str_to_bool(os.environ.get(prefix + "USE_DISTRIBUTED_OPTIMIZER", "False")) == 1
1389
+ )
1390
+ if self.sequence_parallelism is None:
1391
+ self.sequence_parallelism = str_to_bool(os.environ.get(prefix + "SEQUENCE_PARALLELISM", "False")) == 1
1392
+
1393
+ if self.pp_degree > 1 or self.use_distributed_optimizer:
1394
+ self.DDP_impl = "local"
1395
+ else:
1396
+ self.DDP_impl = "torch"
1397
+
1398
+ if self.consumed_samples is not None:
1399
+ if len(self.consumed_samples) == 1:
1400
+ self.consumed_samples.extend([0, 0])
1401
+ elif len(self.consumed_samples) == 2:
1402
+ self.consumed_samples.append(0)
1403
+
1404
+ self.megatron_lm_default_args = {
1405
+ "tensor_model_parallel_size": self.tp_degree,
1406
+ "pipeline_model_parallel_size": self.pp_degree,
1407
+ "pipeline_model_parallel_split_rank": self.pipeline_model_parallel_split_rank,
1408
+ "num_layers_per_virtual_pipeline_stage": self.num_layers_per_virtual_pipeline_stage,
1409
+ "DDP_impl": self.DDP_impl,
1410
+ "use_distributed_optimizer": self.use_distributed_optimizer,
1411
+ "sequence_parallel": self.sequence_parallelism,
1412
+ "clip_grad": self.gradient_clipping,
1413
+ "num_micro_batches": self.num_micro_batches,
1414
+ "consumed_samples": self.consumed_samples,
1415
+ "no_wd_decay_cond": self.no_wd_decay_cond,
1416
+ "scale_lr_cond": self.scale_lr_cond,
1417
+ "lr_mult": self.lr_mult,
1418
+ "megatron_dataset_flag": self.megatron_dataset_flag,
1419
+ "eval_iters": self.eval_iters,
1420
+ "eval_interval": self.eval_interval,
1421
+ }
1422
+ if self.recompute_activations:
1423
+ self.megatron_lm_default_args["recompute_granularity"] = "selective"
1424
+ if self.tensorboard_dir is not None:
1425
+ self.megatron_lm_default_args["tensorboard_dir"] = self.tensorboard_dir
1426
+ if self.set_all_logging_options:
1427
+ self.set_tensorboard_logging_options()
1428
+ if self.other_megatron_args is not None:
1429
+ self.megatron_lm_default_args.update(self.other_megatron_args)
1430
+
1431
+ def set_network_size_args(self, model, batch_data=None):
1432
+ # Check if the model is either BERT, GPT or T5 else raise error
1433
+ # set 'num_layers', 'hidden_size', 'num_attention_heads', 'max_position_embeddings'
1434
+ if "megatron-bert" in model.config.model_type.lower():
1435
+ model_type_name = "bert"
1436
+ num_layers = model.config.num_hidden_layers
1437
+ hidden_size = model.config.hidden_size
1438
+ num_attention_heads = model.config.num_attention_heads
1439
+ max_position_embeddings = model.config.max_position_embeddings
1440
+ num_labels = model.config.num_labels
1441
+ orig_vocab_size = model.config.vocab_size
1442
+ if "maskedlm" in model.__class__.__name__.lower():
1443
+ pretraining_flag = True
1444
+ if self.seq_length is not None:
1445
+ if self.encoder_seq_length is not None:
1446
+ warnings.warn("Both `seq_length` and `encoder_seq_length` are set. Using `encoder_seq_length`.")
1447
+ self.seq_length = self.encoder_seq_length
1448
+ elif self.encoder_seq_length is not None:
1449
+ self.seq_length = self.encoder_seq_length
1450
+ elif batch_data is not None:
1451
+ self.seq_length = batch_data["input_ids"].shape[1]
1452
+ else:
1453
+ self.seq_length = max_position_embeddings
1454
+ self.megatron_lm_default_args["seq_length"] = self.seq_length
1455
+ elif "gpt2" in model.config.model_type.lower():
1456
+ model_type_name = "gpt"
1457
+ num_layers = model.config.n_layer
1458
+ hidden_size = model.config.n_embd
1459
+ num_attention_heads = model.config.n_head
1460
+ max_position_embeddings = model.config.n_positions
1461
+ orig_vocab_size = model.config.vocab_size
1462
+ pretraining_flag = True
1463
+ if self.seq_length is not None:
1464
+ if self.decoder_seq_length is not None:
1465
+ warnings.warn("Both `seq_length` and `decoder_seq_length` are set. Using `decoder_seq_length`.")
1466
+ self.seq_length = self.decoder_seq_length
1467
+ elif self.decoder_seq_length is not None:
1468
+ self.seq_length = self.decoder_seq_length
1469
+ elif batch_data is not None:
1470
+ self.seq_length = batch_data["input_ids"].shape[1]
1471
+ else:
1472
+ self.seq_length = max_position_embeddings
1473
+ self.megatron_lm_default_args["seq_length"] = self.seq_length
1474
+ self.megatron_lm_default_args["return_logits"] = self.return_logits
1475
+ self.megatron_lm_default_args["tokenizer_type"] = "GPT2BPETokenizer"
1476
+ elif "t5" in model.config.model_type.lower():
1477
+ model_type_name = "t5"
1478
+ num_layers = model.config.num_layers
1479
+ hidden_size = model.config.d_model
1480
+ num_attention_heads = model.config.num_heads
1481
+ max_position_embeddings = model.config.n_positions if hasattr(model.config, "n_positions") else 1024
1482
+ orig_vocab_size = model.config.vocab_size
1483
+ pretraining_flag = True
1484
+ if self.encoder_seq_length is None:
1485
+ if batch_data is not None:
1486
+ self.encoder_seq_length = batch_data["input_ids"].shape[1]
1487
+ else:
1488
+ self.encoder_seq_length = max_position_embeddings
1489
+ if self.decoder_seq_length is None:
1490
+ if batch_data is not None:
1491
+ self.decoder_seq_length = batch_data["labels"].shape[1]
1492
+ else:
1493
+ self.decoder_seq_length = max_position_embeddings
1494
+
1495
+ self.megatron_lm_default_args["encoder_seq_length"] = self.encoder_seq_length
1496
+ self.megatron_lm_default_args["decoder_seq_length"] = self.decoder_seq_length
1497
+ else:
1498
+ raise ValueError(
1499
+ "🤗 Accelerate Megatron-LM integration supports only BERT, GPT and T5 model. "
1500
+ "Please check the model you are using is one of those."
1501
+ )
1502
+
1503
+ self.megatron_lm_default_args["model_type_name"] = model_type_name
1504
+ self.megatron_lm_default_args["num_layers"] = num_layers
1505
+ self.megatron_lm_default_args["hidden_size"] = hidden_size
1506
+ self.megatron_lm_default_args["num_attention_heads"] = num_attention_heads
1507
+ self.megatron_lm_default_args["max_position_embeddings"] = max_position_embeddings
1508
+ self.megatron_lm_default_args["pretraining_flag"] = pretraining_flag
1509
+ self.megatron_lm_default_args["orig_vocab_size"] = orig_vocab_size
1510
+ self.megatron_lm_default_args["model_return_dict"] = model.config.return_dict
1511
+ if model_type_name == "bert":
1512
+ self.megatron_lm_default_args["num_labels"] = num_labels
1513
+
1514
+ def set_mixed_precision(self, mixed_precision):
1515
+ if mixed_precision == "fp16":
1516
+ self.megatron_lm_default_args["fp16"] = True
1517
+ elif mixed_precision == "bf16":
1518
+ self.megatron_lm_default_args["bf16"] = True
1519
+ self.DDP_impl = "local"
1520
+ self.megatron_lm_default_args["DDP_impl"] = self.DDP_impl
1521
+
1522
+ def set_training_args(self, micro_batch_size, dp_degree):
1523
+ self.data_parallel_size = dp_degree
1524
+ self.micro_batch_size = micro_batch_size
1525
+ self.global_batch_size = dp_degree * micro_batch_size * self.num_micro_batches
1526
+ self.megatron_lm_default_args["data_parallel_size"] = self.data_parallel_size
1527
+ self.megatron_lm_default_args["micro_batch_size"] = self.micro_batch_size
1528
+ self.megatron_lm_default_args["global_batch_size"] = self.global_batch_size
1529
+
1530
+ def set_optimizer_type(self, optimizer):
1531
+ optimizer_name = optimizer.__class__.__name__.lower()
1532
+ if "adam" in optimizer_name:
1533
+ self.megatron_lm_default_args["optimizer"] = "adam"
1534
+ self.megatron_lm_default_args["adam_beta1"] = optimizer.defaults["betas"][0]
1535
+ self.megatron_lm_default_args["adam_beta2"] = optimizer.defaults["betas"][1]
1536
+ self.megatron_lm_default_args["adam_eps"] = optimizer.defaults["eps"]
1537
+ elif "sgd" in optimizer_name:
1538
+ self.megatron_lm_default_args["optimizer"] = "sgd"
1539
+ self.megatron_lm_default_args["sgd_momentum"] = optimizer.defaults["momentum"]
1540
+ else:
1541
+ raise ValueError(f"Optimizer {optimizer_name} is not supported by Megatron-LM")
1542
+
1543
+ self.megatron_lm_default_args["lr"] = optimizer.defaults["lr"]
1544
+ self.megatron_lm_default_args["weight_decay"] = optimizer.defaults["weight_decay"]
1545
+
1546
+ def set_scheduler_args(self, scheduler):
1547
+ if self.train_iters is None:
1548
+ self.train_iters = scheduler.total_num_steps // self.megatron_lm_default_args["data_parallel_size"]
1549
+ if self.train_samples is not None:
1550
+ self.train_samples = None
1551
+ warnings.warn(
1552
+ "Ignoring `train_samples` as `train_iters` based on scheduler is being used for training."
1553
+ )
1554
+ if self.lr_warmup_iters is None:
1555
+ self.lr_warmup_iters = scheduler.warmup_num_steps // self.megatron_lm_default_args["data_parallel_size"]
1556
+ if self.lr_warmup_samples is not None:
1557
+ warnings.warn(
1558
+ "Ignoring `lr_warmup_samples` as `lr_warmup_iters` based on scheduler is being used for training."
1559
+ )
1560
+ self.lr_warmup_samples = 0
1561
+
1562
+ self.megatron_lm_default_args["train_iters"] = self.train_iters
1563
+ self.megatron_lm_default_args["lr_warmup_iters"] = self.lr_warmup_iters
1564
+ self.megatron_lm_default_args["train_samples"] = self.train_samples
1565
+ self.megatron_lm_default_args["lr_warmup_samples"] = self.lr_warmup_samples
1566
+ self.megatron_lm_default_args["lr_decay_iters"] = self.lr_decay_iters
1567
+ self.megatron_lm_default_args["lr_decay_samples"] = self.lr_decay_samples
1568
+ self.megatron_lm_default_args["lr_warmup_fraction"] = self.lr_warmup_fraction
1569
+ self.megatron_lm_default_args["lr_decay_style"] = self.lr_decay_style
1570
+ self.megatron_lm_default_args["weight_decay_incr_style"] = self.weight_decay_incr_style
1571
+ self.megatron_lm_default_args["start_weight_decay"] = self.start_weight_decay
1572
+ self.megatron_lm_default_args["end_weight_decay"] = self.end_weight_decay
1573
+ self.megatron_lm_default_args["min_lr"] = self.min_lr
1574
+
1575
+ def set_tensorboard_logging_options(self):
1576
+ from megatron.arguments import _add_logging_args
1577
+
1578
+ parser = argparse.ArgumentParser()
1579
+ parser = _add_logging_args(parser)
1580
+ logging_args = parser.parse_known_args()
1581
+ self.dataset_args = vars(logging_args[0])
1582
+ for key, value in self.dataset_args.items():
1583
+ if key.startswith("log_"):
1584
+ self.megatron_lm_default_args[key] = True
1585
+ elif key.startswith("no_log_"):
1586
+ self.megatron_lm_default_args[key.replace("no_", "")] = True
1587
+
1588
+
1589
+ @dataclass
1590
+ class BnbQuantizationConfig:
1591
+ """
1592
+ A plugin to enable BitsAndBytes 4bit and 8bit quantization
1593
+ """
1594
+
1595
+ load_in_8bit: bool = field(default=False, metadata={"help": "enable 8bit quantization."})
1596
+
1597
+ llm_int8_threshold: float = field(
1598
+ default=6.0, metadata={"help": "value of the outliner threshold. only relevant when load_in_8bit=True"}
1599
+ )
1600
+
1601
+ load_in_4bit: bool = field(default=False, metadata={"help": "enable 4bit quantization."})
1602
+
1603
+ bnb_4bit_quant_type: str = field(
1604
+ default="fp4",
1605
+ metadata={
1606
+ "help": "set the quantization data type in the `bnb.nn.Linear4Bit` layers. Options are {'fp4','np4'}."
1607
+ },
1608
+ )
1609
+
1610
+ bnb_4bit_use_double_quant: bool = field(
1611
+ default=False,
1612
+ metadata={
1613
+ "help": "enable nested quantization where the quantization constants from the first quantization are quantized again."
1614
+ },
1615
+ )
1616
+
1617
+ bnb_4bit_compute_dtype: bool = field(
1618
+ default="fp16",
1619
+ metadata={
1620
+ "help": "This sets the computational type which might be different than the input time. For example, inputs might be "
1621
+ "fp32, but computation can be set to bf16 for speedups. Options are {'fp32','fp16','bf16'}."
1622
+ },
1623
+ )
1624
+
1625
+ torch_dtype: torch.dtype = field(
1626
+ default=None,
1627
+ metadata={
1628
+ "help": "this sets the dtype of the remaining non quantized layers. `bitsandbytes` library suggests to set the value"
1629
+ "to `torch.float16` for 8 bit model and use the same dtype as the compute dtype for 4 bit model "
1630
+ },
1631
+ )
1632
+
1633
+ skip_modules: List[str] = field(
1634
+ default=None,
1635
+ metadata={
1636
+ "help": "an explicit list of the modules that we don't quantize. The dtype of these modules will be `torch_dtype`."
1637
+ },
1638
+ )
1639
+
1640
+ keep_in_fp32_modules: List[str] = field(
1641
+ default=None,
1642
+ metadata={"help": "an explicit list of the modules that we don't quantize. We keep them in `torch.float32`."},
1643
+ )
1644
+
1645
+ def __post_init__(self):
1646
+ """
1647
+ Safety checker that arguments are correct - also replaces some NoneType arguments with their default values.
1648
+ """
1649
+ if not isinstance(self.load_in_8bit, bool):
1650
+ raise ValueError("load_in_8bit must be a boolean")
1651
+
1652
+ if not isinstance(self.load_in_4bit, bool):
1653
+ raise ValueError("load_in_4bit must be a boolean")
1654
+
1655
+ if self.load_in_4bit and self.load_in_8bit:
1656
+ raise ValueError("load_in_4bit and load_in_8 can't be both True")
1657
+
1658
+ if not self.load_in_4bit and not self.load_in_8bit:
1659
+ raise ValueError("load_in_4bit and load_in_8 can't be both False")
1660
+
1661
+ if not isinstance(self.llm_int8_threshold, (int, float)):
1662
+ raise ValueError("llm_int8_threshold must be a float or an int")
1663
+
1664
+ if not isinstance(self.bnb_4bit_quant_type, str):
1665
+ raise ValueError("bnb_4bit_quant_type must be a string")
1666
+ elif self.bnb_4bit_quant_type not in ["fp4", "nf4"]:
1667
+ raise ValueError(f"bnb_4bit_quant_type must be in ['fp4','nf4'] but found {self.bnb_4bit_quant_type}")
1668
+
1669
+ if not isinstance(self.bnb_4bit_use_double_quant, bool):
1670
+ raise ValueError("bnb_4bit_use_double_quant must be a boolean")
1671
+
1672
+ if isinstance(self.bnb_4bit_compute_dtype, str):
1673
+ if self.bnb_4bit_compute_dtype == "fp32":
1674
+ self.bnb_4bit_compute_dtype = torch.float32
1675
+ elif self.bnb_4bit_compute_dtype == "fp16":
1676
+ self.bnb_4bit_compute_dtype = torch.float16
1677
+ elif self.bnb_4bit_compute_dtype == "bf16":
1678
+ self.bnb_4bit_compute_dtype = torch.bfloat16
1679
+ else:
1680
+ raise ValueError(
1681
+ f"bnb_4bit_compute_dtype must be in ['fp32','fp16','bf16'] but found {self.bnb_4bit_compute_dtype}"
1682
+ )
1683
+ elif not isinstance(self.bnb_4bit_compute_dtype, torch.dtype):
1684
+ raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype")
1685
+
1686
+ if self.skip_modules is not None and not isinstance(self.skip_modules, list):
1687
+ raise ValueError("skip_modules must be a list of strings")
1688
+
1689
+ if self.keep_in_fp32_modules is not None and not isinstance(self.keep_in_fp32_modules, list):
1690
+ raise ValueError("keep_in_fp_32_modules must be a list of strings")
1691
+
1692
+ if self.load_in_4bit:
1693
+ self.target_dtype = CustomDtype.INT4
1694
+
1695
+ if self.load_in_8bit:
1696
+ self.target_dtype = torch.int8
1697
+
1698
+ if self.load_in_4bit and self.llm_int8_threshold != 6.0:
1699
+ warnings.warn("llm_int8_threshold can only be used for model loaded in 8bit")
1700
+
1701
+ if isinstance(self.torch_dtype, str):
1702
+ if self.torch_dtype == "fp32":
1703
+ self.torch_dtype = torch.float32
1704
+ elif self.torch_dtype == "fp16":
1705
+ self.torch_dtype = torch.float16
1706
+ elif self.torch_dtype == "bf16":
1707
+ self.torch_dtype = torch.bfloat16
1708
+ else:
1709
+ raise ValueError(f"torch_dtype must be in ['fp32','fp16','bf16'] but found {self.torch_dtype}")
1710
+ if self.load_in_8bit and self.torch_dtype is None:
1711
+ self.torch_dtype = torch.float16
1712
+
1713
+ if self.load_in_4bit and self.torch_dtype is None:
1714
+ self.torch_dtype = self.bnb_4bit_compute_dtype
1715
+
1716
+ if not isinstance(self.torch_dtype, torch.dtype):
1717
+ raise ValueError("torch_dtype must be a torch.dtype")
venv/lib/python3.10/site-packages/accelerate/utils/environment.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import logging
16
+ import math
17
+ import os
18
+ import platform
19
+ import subprocess
20
+ import sys
21
+ from dataclasses import dataclass, field
22
+ from functools import lru_cache
23
+ from shutil import which
24
+ from typing import List, Optional
25
+
26
+ import torch
27
+ from packaging.version import parse
28
+
29
+
30
+ logger = logging.getLogger(__name__)
31
+
32
+
33
+ def convert_dict_to_env_variables(current_env: dict):
34
+ """
35
+ Verifies that all keys and values in `current_env` do not contain illegal keys or values, and returns a list of
36
+ strings as the result.
37
+
38
+ Example:
39
+ ```python
40
+ >>> from accelerate.utils.environment import verify_env
41
+
42
+ >>> env = {"ACCELERATE_DEBUG_MODE": "1", "BAD_ENV_NAME": "<mything", "OTHER_ENV": "2"}
43
+ >>> valid_env_items = verify_env(env)
44
+ >>> print(valid_env_items)
45
+ ["ACCELERATE_DEBUG_MODE=1\n", "OTHER_ENV=2\n"]
46
+ ```
47
+ """
48
+ forbidden_chars = [";", "\n", "<", ">", " "]
49
+ valid_env_items = []
50
+ for key, value in current_env.items():
51
+ if all(char not in (key + value) for char in forbidden_chars) and len(key) >= 1 and len(value) >= 1:
52
+ valid_env_items.append(f"{key}={value}\n")
53
+ else:
54
+ logger.warning(f"WARNING: Skipping {key}={value} as it contains forbidden characters or missing values.")
55
+ return valid_env_items
56
+
57
+
58
+ def str_to_bool(value) -> int:
59
+ """
60
+ Converts a string representation of truth to `True` (1) or `False` (0).
61
+
62
+ True values are `y`, `yes`, `t`, `true`, `on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`;
63
+ """
64
+ value = value.lower()
65
+ if value in ("y", "yes", "t", "true", "on", "1"):
66
+ return 1
67
+ elif value in ("n", "no", "f", "false", "off", "0"):
68
+ return 0
69
+ else:
70
+ raise ValueError(f"invalid truth value {value}")
71
+
72
+
73
+ def get_int_from_env(env_keys, default):
74
+ """Returns the first positive env value found in the `env_keys` list or the default."""
75
+ for e in env_keys:
76
+ val = int(os.environ.get(e, -1))
77
+ if val >= 0:
78
+ return val
79
+ return default
80
+
81
+
82
+ def parse_flag_from_env(key, default=False):
83
+ """Returns truthy value for `key` from the env if available else the default."""
84
+ value = os.environ.get(key, str(default))
85
+ return str_to_bool(value) == 1 # As its name indicates `str_to_bool` actually returns an int...
86
+
87
+
88
+ def parse_choice_from_env(key, default="no"):
89
+ value = os.environ.get(key, str(default))
90
+ return value
91
+
92
+
93
+ def are_libraries_initialized(*library_names: str) -> List[str]:
94
+ """
95
+ Checks if any of `library_names` are imported in the environment. Will return any names that are.
96
+ """
97
+ return [lib_name for lib_name in library_names if lib_name in sys.modules.keys()]
98
+
99
+
100
+ def _nvidia_smi():
101
+ """
102
+ Returns the right nvidia-smi command based on the system.
103
+ """
104
+ if platform.system() == "Windows":
105
+ # If platform is Windows and nvidia-smi can't be found in path
106
+ # try from systemd drive with default installation path
107
+ command = which("nvidia-smi")
108
+ if command is None:
109
+ command = "%s\\Program Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe" % os.environ["systemdrive"]
110
+ else:
111
+ command = "nvidia-smi"
112
+ return command
113
+
114
+
115
+ def get_gpu_info():
116
+ """
117
+ Gets GPU count and names using `nvidia-smi` instead of torch to not initialize CUDA.
118
+
119
+ Largely based on the `gputil` library.
120
+ """
121
+ # Returns as list of `n` GPUs and their names
122
+ output = subprocess.check_output(
123
+ [_nvidia_smi(), "--query-gpu=count,name", "--format=csv,noheader"], universal_newlines=True
124
+ )
125
+ output = output.strip()
126
+ gpus = output.split(os.linesep)
127
+ # Get names from output
128
+ gpu_count = len(gpus)
129
+ gpu_names = [gpu.split(",")[1].strip() for gpu in gpus]
130
+ return gpu_names, gpu_count
131
+
132
+
133
+ def get_driver_version():
134
+ """
135
+ Returns the driver version
136
+
137
+ In the case of multiple GPUs, will return the first.
138
+ """
139
+ output = subprocess.check_output(
140
+ [_nvidia_smi(), "--query-gpu=driver_version", "--format=csv,noheader"], universal_newlines=True
141
+ )
142
+ output = output.strip()
143
+ return output.split(os.linesep)[0]
144
+
145
+
146
+ def check_cuda_p2p_ib_support():
147
+ """
148
+ Checks if the devices being used have issues with P2P and IB communications, namely any consumer GPU hardware after
149
+ the 3090.
150
+
151
+ Noteably uses `nvidia-smi` instead of torch to not initialize CUDA.
152
+ """
153
+ try:
154
+ device_names, device_count = get_gpu_info()
155
+ # As new consumer GPUs get released, add them to `unsupported_devices``
156
+ unsupported_devices = {"RTX 40"}
157
+ if device_count > 1:
158
+ if any(
159
+ unsupported_device in device_name
160
+ for device_name in device_names
161
+ for unsupported_device in unsupported_devices
162
+ ):
163
+ # Check if they have the right driver version
164
+ acceptable_driver_version = "550.40.07"
165
+ current_driver_version = get_driver_version()
166
+ if parse(current_driver_version) < parse(acceptable_driver_version):
167
+ return False
168
+ return True
169
+ except Exception:
170
+ pass
171
+ return True
172
+
173
+
174
+ def check_fp8_capability():
175
+ """
176
+ Checks if all the current GPUs available support FP8.
177
+
178
+ Notably must initialize `torch.cuda` to check.
179
+ """
180
+ cuda_device_capacity = torch.cuda.get_device_capability()
181
+ return cuda_device_capacity >= (8, 9)
182
+
183
+
184
+ @dataclass
185
+ class CPUInformation:
186
+ """
187
+ Stores information about the CPU in a distributed environment. It contains the following attributes:
188
+ - rank: The rank of the current process.
189
+ - world_size: The total number of processes in the world.
190
+ - local_rank: The rank of the current process on the local node.
191
+ - local_world_size: The total number of processes on the local node.
192
+ """
193
+
194
+ rank: int = field(default=0, metadata={"help": "The rank of the current process."})
195
+ world_size: int = field(default=1, metadata={"help": "The total number of processes in the world."})
196
+ local_rank: int = field(default=0, metadata={"help": "The rank of the current process on the local node."})
197
+ local_world_size: int = field(default=1, metadata={"help": "The total number of processes on the local node."})
198
+
199
+
200
+ def get_cpu_distributed_information() -> CPUInformation:
201
+ """
202
+ Returns various information about the environment in relation to CPU distributed training as a `CPUInformation`
203
+ dataclass.
204
+ """
205
+ information = {}
206
+ information["rank"] = get_int_from_env(["RANK", "PMI_RANK", "OMPI_COMM_WORLD_RANK", "MV2_COMM_WORLD_RANK"], 0)
207
+ information["world_size"] = get_int_from_env(
208
+ ["WORLD_SIZE", "PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE"], 1
209
+ )
210
+ information["local_rank"] = get_int_from_env(
211
+ ["LOCAL_RANK", "MPI_LOCALRANKID", "OMPI_COMM_WORLD_LOCAL_RANK", "MV2_COMM_WORLD_LOCAL_RANK"], 0
212
+ )
213
+ information["local_world_size"] = get_int_from_env(
214
+ ["LOCAL_WORLD_SIZE", "MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"],
215
+ 1,
216
+ )
217
+ return CPUInformation(**information)
218
+
219
+
220
+ def override_numa_affinity(local_process_index: int, verbose: Optional[bool] = None) -> None:
221
+ """
222
+ Overrides whatever NUMA affinity is set for the current process. This is very taxing and requires recalculating the
223
+ affinity to set, ideally you should use `utils.environment.set_numa_affinity` instead.
224
+
225
+ Args:
226
+ local_process_index (int):
227
+ The index of the current process on the current server.
228
+ verbose (bool, *optional*):
229
+ Whether to log out the assignment of each CPU. If `ACCELERATE_DEBUG_MODE` is enabled, will default to True.
230
+ """
231
+ if verbose is None:
232
+ verbose = parse_flag_from_env("ACCELERATE_DEBUG_MODE", False)
233
+ if torch.cuda.is_available():
234
+ from accelerate.utils import is_pynvml_available
235
+
236
+ if not is_pynvml_available():
237
+ raise ImportError(
238
+ "To set CPU affinity on CUDA GPUs the `pynvml` package must be available. (`pip install pynvml`)"
239
+ )
240
+ import pynvml as nvml
241
+
242
+ # The below code is based on https://github.com/NVIDIA/DeepLearningExamples/blob/master/TensorFlow2/LanguageModeling/BERT/gpu_affinity.py
243
+ nvml.nvmlInit()
244
+ num_elements = math.ceil(os.cpu_count() / 64)
245
+ handle = nvml.nvmlDeviceGetHandleByIndex(local_process_index)
246
+ affinity_string = ""
247
+ for j in nvml.nvmlDeviceGetCpuAffinity(handle, num_elements):
248
+ # assume nvml returns list of 64 bit ints
249
+ affinity_string = f"{j:064b}{affinity_string}"
250
+ affinity_list = [int(x) for x in affinity_string]
251
+ affinity_list.reverse() # so core 0 is the 0th element
252
+ affinity_to_set = [i for i, e in enumerate(affinity_list) if e != 0]
253
+ os.sched_setaffinity(0, affinity_to_set)
254
+ if verbose:
255
+ cpu_cores = os.sched_getaffinity(0)
256
+ logger.info(f"Assigning {len(cpu_cores)} cpu cores to process {local_process_index}: {cpu_cores}")
257
+
258
+
259
+ @lru_cache
260
+ def set_numa_affinity(local_process_index: int, verbose: Optional[bool] = None) -> None:
261
+ """
262
+ Assigns the current process to a specific NUMA node. Ideally most efficient when having at least 2 cpus per node.
263
+
264
+ This result is cached between calls. If you want to override it, please use
265
+ `accelerate.utils.environment.override_numa_afifnity`.
266
+
267
+ Args:
268
+ local_process_index (int):
269
+ The index of the current process on the current server.
270
+ verbose (bool, *optional*):
271
+ Whether to print the new cpu cores assignment for each process. If `ACCELERATE_DEBUG_MODE` is enabled, will
272
+ default to True.
273
+ """
274
+ override_numa_affinity(local_process_index=local_process_index, verbose=verbose)
venv/lib/python3.10/site-packages/accelerate/utils/memory.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ A collection of utilities for ensuring that training can always occur. Heavily influenced by the
17
+ [toma](https://github.com/BlackHC/toma) library.
18
+ """
19
+
20
+ import functools
21
+ import gc
22
+ import inspect
23
+
24
+ import torch
25
+
26
+ from .imports import is_mlu_available, is_mps_available, is_npu_available, is_xpu_available
27
+
28
+
29
+ def release_memory(*objects):
30
+ """
31
+ Releases memory from `objects` by setting them to `None` and calls `gc.collect()` and `torch.cuda.empty_cache()`.
32
+ Returned objects should be reassigned to the same variables.
33
+
34
+ Args:
35
+ objects (`Iterable`):
36
+ An iterable of objects
37
+ Returns:
38
+ A list of `None` objects to replace `objects`
39
+
40
+ Example:
41
+
42
+ ```python
43
+ >>> import torch
44
+ >>> from accelerate.utils import release_memory
45
+
46
+ >>> a = torch.ones(1000, 1000).cuda()
47
+ >>> b = torch.ones(1000, 1000).cuda()
48
+ >>> a, b = release_memory(a, b)
49
+ ```
50
+ """
51
+ if not isinstance(objects, list):
52
+ objects = list(objects)
53
+ for i in range(len(objects)):
54
+ objects[i] = None
55
+ gc.collect()
56
+ if is_xpu_available():
57
+ torch.xpu.empty_cache()
58
+ elif is_mlu_available():
59
+ torch.mlu.empty_cache()
60
+ elif is_npu_available():
61
+ torch.npu.empty_cache()
62
+ elif is_mps_available():
63
+ torch.mps.empty_cache()
64
+ else:
65
+ torch.cuda.empty_cache()
66
+ return objects
67
+
68
+
69
+ def should_reduce_batch_size(exception: Exception) -> bool:
70
+ """
71
+ Checks if `exception` relates to CUDA out-of-memory, CUDNN not supported, or CPU out-of-memory
72
+
73
+ Args:
74
+ exception (`Exception`):
75
+ An exception
76
+ """
77
+ _statements = [
78
+ "CUDA out of memory.", # CUDA OOM
79
+ "cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
80
+ "DefaultCPUAllocator: can't allocate memory", # CPU OOM
81
+ ]
82
+ if isinstance(exception, RuntimeError) and len(exception.args) == 1:
83
+ return any(err in exception.args[0] for err in _statements)
84
+ return False
85
+
86
+
87
+ def find_executable_batch_size(function: callable = None, starting_batch_size: int = 128):
88
+ """
89
+ A basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or
90
+ CUDNN, the batch size is cut in half and passed to `function`
91
+
92
+ `function` must take in a `batch_size` parameter as its first argument.
93
+
94
+ Args:
95
+ function (`callable`, *optional*):
96
+ A function to wrap
97
+ starting_batch_size (`int`, *optional*):
98
+ The batch size to try and fit into memory
99
+
100
+ Example:
101
+
102
+ ```python
103
+ >>> from accelerate.utils import find_executable_batch_size
104
+
105
+
106
+ >>> @find_executable_batch_size(starting_batch_size=128)
107
+ ... def train(batch_size, model, optimizer):
108
+ ... ...
109
+
110
+
111
+ >>> train(model, optimizer)
112
+ ```
113
+ """
114
+ if function is None:
115
+ return functools.partial(find_executable_batch_size, starting_batch_size=starting_batch_size)
116
+
117
+ batch_size = starting_batch_size
118
+
119
+ def decorator(*args, **kwargs):
120
+ nonlocal batch_size
121
+ gc.collect()
122
+ if is_xpu_available():
123
+ torch.xpu.empty_cache()
124
+ elif is_mlu_available():
125
+ torch.mlu.empty_cache()
126
+ elif is_npu_available():
127
+ torch.npu.empty_cache()
128
+ else:
129
+ torch.cuda.empty_cache()
130
+ params = list(inspect.signature(function).parameters.keys())
131
+ # Guard against user error
132
+ if len(params) < (len(args) + 1):
133
+ arg_str = ", ".join([f"{arg}={value}" for arg, value in zip(params[1:], args[1:])])
134
+ raise TypeError(
135
+ f"Batch size was passed into `{function.__name__}` as the first argument when called."
136
+ f"Remove this as the decorator already does so: `{function.__name__}({arg_str})`"
137
+ )
138
+ while True:
139
+ if batch_size == 0:
140
+ raise RuntimeError("No executable batch size found, reached zero.")
141
+ try:
142
+ return function(batch_size, *args, **kwargs)
143
+ except Exception as e:
144
+ if should_reduce_batch_size(e):
145
+ gc.collect()
146
+ if is_xpu_available():
147
+ torch.xpu.empty_cache()
148
+ elif is_mlu_available():
149
+ torch.mlu.empty_cache()
150
+ elif is_npu_available():
151
+ torch.npu.empty_cache()
152
+ else:
153
+ torch.cuda.empty_cache()
154
+ batch_size //= 2
155
+ else:
156
+ raise
157
+
158
+ return decorator
venv/lib/python3.10/site-packages/accelerate/utils/offload.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import json
16
+ import os
17
+ from collections.abc import Mapping
18
+ from typing import Dict, List, Optional, Union
19
+
20
+ import numpy as np
21
+ import torch
22
+ from safetensors import safe_open
23
+
24
+
25
+ def offload_weight(weight, weight_name, offload_folder, index=None):
26
+ dtype = None
27
+ # Check the string instead of the dtype to be compatible with versions of PyTorch that don't have bfloat16.
28
+ if str(weight.dtype) == "torch.bfloat16":
29
+ # Need to reinterpret the underlined data as int16 since NumPy does not handle bfloat16s.
30
+ weight = weight.view(torch.int16)
31
+ dtype = "bfloat16"
32
+ array = weight.cpu().numpy()
33
+ tensor_file = os.path.join(offload_folder, f"{weight_name}.dat")
34
+ if index is not None:
35
+ if dtype is None:
36
+ dtype = str(array.dtype)
37
+ index[weight_name] = {"dtype": dtype, "shape": list(array.shape)}
38
+ if array.ndim == 0:
39
+ array = array[None]
40
+ file_array = np.memmap(tensor_file, dtype=array.dtype, mode="w+", shape=array.shape)
41
+ file_array[:] = array[:]
42
+ file_array.flush()
43
+ return index
44
+
45
+
46
+ def load_offloaded_weight(weight_file, weight_info):
47
+ shape = tuple(weight_info["shape"])
48
+ if shape == ():
49
+ # NumPy memory-mapped arrays can't have 0 dims so it was saved as 1d tensor
50
+ shape = (1,)
51
+
52
+ dtype = weight_info["dtype"]
53
+ if dtype == "bfloat16":
54
+ # NumPy does not support bfloat16 so this was saved as a int16
55
+ dtype = "int16"
56
+
57
+ weight = np.memmap(weight_file, dtype=dtype, shape=shape, mode="r")
58
+
59
+ if len(weight_info["shape"]) == 0:
60
+ weight = weight[0]
61
+ weight = torch.tensor(weight)
62
+ if weight_info["dtype"] == "bfloat16":
63
+ weight = weight.view(torch.bfloat16)
64
+
65
+ return weight
66
+
67
+
68
+ def save_offload_index(index, offload_folder):
69
+ if index is None or len(index) == 0:
70
+ # Nothing to save
71
+ return
72
+
73
+ offload_index_file = os.path.join(offload_folder, "index.json")
74
+ if os.path.isfile(offload_index_file):
75
+ with open(offload_index_file, encoding="utf-8") as f:
76
+ current_index = json.load(f)
77
+ else:
78
+ current_index = {}
79
+ current_index.update(index)
80
+
81
+ with open(offload_index_file, "w", encoding="utf-8") as f:
82
+ json.dump(current_index, f, indent=2)
83
+
84
+
85
+ def offload_state_dict(save_dir: Union[str, os.PathLike], state_dict: Dict[str, torch.Tensor]):
86
+ """
87
+ Offload a state dict in a given folder.
88
+
89
+ Args:
90
+ save_dir (`str` or `os.PathLike`):
91
+ The directory in which to offload the state dict.
92
+ state_dict (`Dict[str, torch.Tensor]`):
93
+ The dictionary of tensors to offload.
94
+ """
95
+ os.makedirs(save_dir, exist_ok=True)
96
+ index = {}
97
+ for name, parameter in state_dict.items():
98
+ index = offload_weight(parameter, name, save_dir, index=index)
99
+
100
+ # Update index
101
+ save_offload_index(index, save_dir)
102
+
103
+
104
+ class PrefixedDataset(Mapping):
105
+ """
106
+ Will access keys in a given dataset by adding a prefix.
107
+
108
+ Args:
109
+ dataset (`Mapping`): Any map with string keys.
110
+ prefix (`str`): A prefix to add when trying to access any element in the underlying dataset.
111
+ """
112
+
113
+ def __init__(self, dataset: Mapping, prefix: str):
114
+ self.dataset = dataset
115
+ self.prefix = prefix
116
+
117
+ def __getitem__(self, key):
118
+ return self.dataset[f"{self.prefix}{key}"]
119
+
120
+ def __iter__(self):
121
+ return iter([key for key in self.dataset if key.startswith(self.prefix)])
122
+
123
+ def __len__(self):
124
+ return len(self.dataset)
125
+
126
+
127
+ class OffloadedWeightsLoader(Mapping):
128
+ """
129
+ A collection that loads weights stored in a given state dict or memory-mapped on disk.
130
+
131
+ Args:
132
+ state_dict (`Dict[str, torch.Tensor]`, *optional*):
133
+ A dictionary parameter name to tensor.
134
+ save_folder (`str` or `os.PathLike`, *optional*):
135
+ The directory in which the weights are stored (by `offload_state_dict` for instance).
136
+ index (`Dict`, *optional*):
137
+ A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default
138
+ to the index saved in `save_folder`.
139
+ """
140
+
141
+ def __init__(
142
+ self,
143
+ state_dict: Dict[str, torch.Tensor] = None,
144
+ save_folder: Optional[Union[str, os.PathLike]] = None,
145
+ index: Mapping = None,
146
+ device=None,
147
+ ):
148
+ if state_dict is None and save_folder is None and index is None:
149
+ raise ValueError("Need either a `state_dict`, a `save_folder` or an `index` containing offloaded weights.")
150
+
151
+ self.state_dict = {} if state_dict is None else state_dict
152
+ self.save_folder = save_folder
153
+ if index is None and save_folder is not None:
154
+ with open(os.path.join(save_folder, "index.json")) as f:
155
+ index = json.load(f)
156
+ self.index = {} if index is None else index
157
+ self.all_keys = list(self.state_dict.keys())
158
+ self.all_keys.extend([key for key in self.index if key not in self.all_keys])
159
+ self.device = device
160
+
161
+ def __getitem__(self, key: str):
162
+ # State dict gets priority
163
+ if key in self.state_dict:
164
+ return self.state_dict[key]
165
+ weight_info = self.index[key]
166
+ if weight_info.get("safetensors_file") is not None:
167
+ device = "cpu" if self.device is None else self.device
168
+ tensor = None
169
+ try:
170
+ with safe_open(weight_info["safetensors_file"], framework="pt", device=device) as f:
171
+ tensor = f.get_tensor(weight_info.get("weight_name", key))
172
+ except TypeError:
173
+ # if failed to get_tensor on the device, such as bf16 on mps, try to load it on CPU first
174
+ with safe_open(weight_info["safetensors_file"], framework="pt", device="cpu") as f:
175
+ tensor = f.get_tensor(weight_info.get("weight_name", key))
176
+
177
+ if "dtype" in weight_info:
178
+ tensor = tensor.to(getattr(torch, weight_info["dtype"]))
179
+
180
+ if tensor.device != torch.device(device):
181
+ tensor = tensor.to(device)
182
+ return tensor
183
+
184
+ weight_file = os.path.join(self.save_folder, f"{key}.dat")
185
+ return load_offloaded_weight(weight_file, weight_info)
186
+
187
+ def __iter__(self):
188
+ return iter(self.all_keys)
189
+
190
+ def __len__(self):
191
+ return len(self.all_keys)
192
+
193
+
194
+ def extract_submodules_state_dict(state_dict: Dict[str, torch.Tensor], submodule_names: List[str]):
195
+ """
196
+ Extract the sub state-dict corresponding to a list of given submodules.
197
+
198
+ Args:
199
+ state_dict (`Dict[str, torch.Tensor]`): The state dict to extract from.
200
+ submodule_names (`List[str]`): The list of submodule names we want to extract.
201
+ """
202
+ result = {}
203
+ for module_name in submodule_names:
204
+ # We want to catch module_name parameter (module_name.xxx) or potentially module_name, but not any of the
205
+ # submodules that could being like module_name (transformers.h.1 and transformers.h.10 for instance)
206
+ result.update(
207
+ {
208
+ key: param
209
+ for key, param in state_dict.items()
210
+ if key == module_name or key.startswith(module_name + ".")
211
+ }
212
+ )
213
+ return result
venv/lib/python3.10/site-packages/accelerate/utils/operations.py ADDED
@@ -0,0 +1,851 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """
15
+ A set of basic tensor ops compatible with tpu, gpu, and multigpu
16
+ """
17
+
18
+ import pickle
19
+ import warnings
20
+ from functools import update_wrapper, wraps
21
+ from typing import Any, Mapping
22
+
23
+ import torch
24
+
25
+ from ..state import PartialState
26
+ from .constants import TORCH_DISTRIBUTED_OPERATION_TYPES
27
+ from .dataclasses import DistributedType, TensorInformation
28
+ from .imports import (
29
+ is_npu_available,
30
+ is_torch_distributed_available,
31
+ is_torch_version,
32
+ is_torch_xla_available,
33
+ is_xpu_available,
34
+ )
35
+
36
+
37
+ if is_torch_xla_available():
38
+ import torch_xla.core.xla_model as xm
39
+
40
+ if is_torch_distributed_available():
41
+ from torch.distributed import ReduceOp
42
+
43
+
44
+ def is_torch_tensor(tensor):
45
+ return isinstance(tensor, torch.Tensor)
46
+
47
+
48
+ def is_torch_xpu_tensor(tensor):
49
+ return isinstance(
50
+ tensor,
51
+ torch.xpu.FloatTensor,
52
+ torch.xpu.ByteTensor,
53
+ torch.xpu.IntTensor,
54
+ torch.xpu.LongTensor,
55
+ torch.xpu.HalfTensor,
56
+ torch.xpu.DoubleTensor,
57
+ torch.xpu.BFloat16Tensor,
58
+ )
59
+
60
+
61
+ def is_tensor_information(tensor_info):
62
+ return isinstance(tensor_info, TensorInformation)
63
+
64
+
65
+ def is_namedtuple(data):
66
+ """
67
+ Checks if `data` is a `namedtuple` or not. Can have false positives, but only if a user is trying to mimic a
68
+ `namedtuple` perfectly.
69
+ """
70
+ return isinstance(data, tuple) and hasattr(data, "_asdict") and hasattr(data, "_fields")
71
+
72
+
73
+ def honor_type(obj, generator):
74
+ """
75
+ Cast a generator to the same type as obj (list, tuple, or namedtuple)
76
+ """
77
+ # Some objects may not be able to instantiate from a generator directly
78
+ if is_namedtuple(obj):
79
+ return type(obj)(*list(generator))
80
+ else:
81
+ return type(obj)(generator)
82
+
83
+
84
+ def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_other_type=False, **kwargs):
85
+ """
86
+ Recursively apply a function on a data structure that is a nested list/tuple/dictionary of a given base type.
87
+
88
+ Args:
89
+ func (`callable`):
90
+ The function to recursively apply.
91
+ data (nested list/tuple/dictionary of `main_type`):
92
+ The data on which to apply `func`
93
+ *args:
94
+ Positional arguments that will be passed to `func` when applied on the unpacked data.
95
+ main_type (`type`, *optional*, defaults to `torch.Tensor`):
96
+ The base type of the objects to which apply `func`.
97
+ error_on_other_type (`bool`, *optional*, defaults to `False`):
98
+ Whether to return an error or not if after unpacking `data`, we get on an object that is not of type
99
+ `main_type`. If `False`, the function will leave objects of types different than `main_type` unchanged.
100
+ **kwargs (additional keyword arguments, *optional*):
101
+ Keyword arguments that will be passed to `func` when applied on the unpacked data.
102
+
103
+ Returns:
104
+ The same data structure as `data` with `func` applied to every object of type `main_type`.
105
+ """
106
+ if isinstance(data, (tuple, list)):
107
+ return honor_type(
108
+ data,
109
+ (
110
+ recursively_apply(
111
+ func, o, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs
112
+ )
113
+ for o in data
114
+ ),
115
+ )
116
+ elif isinstance(data, Mapping):
117
+ return type(data)(
118
+ {
119
+ k: recursively_apply(
120
+ func, v, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs
121
+ )
122
+ for k, v in data.items()
123
+ }
124
+ )
125
+ elif test_type(data):
126
+ return func(data, *args, **kwargs)
127
+ elif error_on_other_type:
128
+ raise TypeError(
129
+ f"Unsupported types ({type(data)}) passed to `{func.__name__}`. Only nested list/tuple/dicts of "
130
+ f"objects that are valid for `{test_type.__name__}` should be passed."
131
+ )
132
+ return data
133
+
134
+
135
+ def send_to_device(tensor, device, non_blocking=False, skip_keys=None):
136
+ """
137
+ Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device.
138
+
139
+ Args:
140
+ tensor (nested list/tuple/dictionary of `torch.Tensor`):
141
+ The data to send to a given device.
142
+ device (`torch.device`):
143
+ The device to send the data to.
144
+
145
+ Returns:
146
+ The same data structure as `tensor` with all tensors sent to the proper device.
147
+ """
148
+ if is_torch_tensor(tensor) or hasattr(tensor, "to"):
149
+ # `torch.Tensor.to("npu")` could not find context when called for the first time (see this [issue](https://gitee.com/ascend/pytorch/issues/I8KECW?from=project-issue)).
150
+ if device == "npu":
151
+ device = "npu:0"
152
+ if device == "xpu":
153
+ device = "xpu:0"
154
+ # TODO: torch_mlu LongTensor.to(<int num>) has bugs, we will fix this later.
155
+ if is_torch_tensor(tensor) and tensor.device.type in ["mlu"] and tensor.dtype in [torch.int64]:
156
+ tensor = tensor.cpu()
157
+ try:
158
+ return tensor.to(device, non_blocking=non_blocking)
159
+ except TypeError: # .to() doesn't accept non_blocking as kwarg
160
+ return tensor.to(device)
161
+ except AssertionError as error:
162
+ # `torch.Tensor.to(<int num>)` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)).
163
+ # This call is inside the try-block since is_npu_available is not supported by torch.compile.
164
+ if is_npu_available():
165
+ if isinstance(device, int):
166
+ device = f"npu:{device}"
167
+ else:
168
+ raise error
169
+ except Exception as error:
170
+ if is_xpu_available():
171
+ if isinstance(device, int):
172
+ device = f"xpu:{device}"
173
+ else:
174
+ raise error
175
+ try:
176
+ return tensor.to(device, non_blocking=non_blocking)
177
+ except TypeError: # .to() doesn't accept non_blocking as kwarg
178
+ return tensor.to(device)
179
+ elif isinstance(tensor, (tuple, list)):
180
+ return honor_type(
181
+ tensor, (send_to_device(t, device, non_blocking=non_blocking, skip_keys=skip_keys) for t in tensor)
182
+ )
183
+ elif isinstance(tensor, Mapping):
184
+ if isinstance(skip_keys, str):
185
+ skip_keys = [skip_keys]
186
+ elif skip_keys is None:
187
+ skip_keys = []
188
+ return type(tensor)(
189
+ {
190
+ k: t if k in skip_keys else send_to_device(t, device, non_blocking=non_blocking, skip_keys=skip_keys)
191
+ for k, t in tensor.items()
192
+ }
193
+ )
194
+ else:
195
+ return tensor
196
+
197
+
198
+ def get_data_structure(data):
199
+ """
200
+ Recursively gathers the information needed to rebuild a nested list/tuple/dictionary of tensors.
201
+
202
+ Args:
203
+ data (nested list/tuple/dictionary of `torch.Tensor`):
204
+ The data to send to analyze.
205
+
206
+ Returns:
207
+ The same data structure as `data` with [`~utils.TensorInformation`] instead of tensors.
208
+ """
209
+
210
+ def _get_data_structure(tensor):
211
+ return TensorInformation(shape=tensor.shape, dtype=tensor.dtype)
212
+
213
+ return recursively_apply(_get_data_structure, data)
214
+
215
+
216
+ def get_shape(data):
217
+ """
218
+ Recursively gathers the shape of a nested list/tuple/dictionary of tensors as a list.
219
+
220
+ Args:
221
+ data (nested list/tuple/dictionary of `torch.Tensor`):
222
+ The data to send to analyze.
223
+
224
+ Returns:
225
+ The same data structure as `data` with lists of tensor shapes instead of tensors.
226
+ """
227
+
228
+ def _get_shape(tensor):
229
+ return list(tensor.shape)
230
+
231
+ return recursively_apply(_get_shape, data)
232
+
233
+
234
+ def initialize_tensors(data_structure):
235
+ """
236
+ Recursively initializes tensors from a nested list/tuple/dictionary of [`~utils.TensorInformation`].
237
+
238
+ Returns:
239
+ The same data structure as `data` with tensors instead of [`~utils.TensorInformation`].
240
+ """
241
+
242
+ def _initialize_tensor(tensor_info):
243
+ return torch.empty(*tensor_info.shape, dtype=tensor_info.dtype)
244
+
245
+ return recursively_apply(_initialize_tensor, data_structure, test_type=is_tensor_information)
246
+
247
+
248
+ def find_batch_size(data):
249
+ """
250
+ Recursively finds the batch size in a nested list/tuple/dictionary of lists of tensors.
251
+
252
+ Args:
253
+ data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to find the batch size.
254
+
255
+ Returns:
256
+ `int`: The batch size.
257
+ """
258
+ if isinstance(data, (tuple, list, Mapping)) and (len(data) == 0):
259
+ raise ValueError(f"Cannot find the batch size from empty {type(data)}.")
260
+
261
+ if isinstance(data, (tuple, list)):
262
+ return find_batch_size(data[0])
263
+ elif isinstance(data, Mapping):
264
+ for k in data.keys():
265
+ return find_batch_size(data[k])
266
+ elif not isinstance(data, torch.Tensor):
267
+ raise TypeError(f"Can only find the batch size of tensors but got {type(data)}.")
268
+ return data.shape[0]
269
+
270
+
271
+ def ignorant_find_batch_size(data):
272
+ """
273
+ Same as [`utils.operations.find_batch_size`] except will ignore if `ValueError` and `TypeErrors` are raised
274
+
275
+ Args:
276
+ data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to find the batch size.
277
+
278
+ Returns:
279
+ `int`: The batch size.
280
+ """
281
+ try:
282
+ return find_batch_size(data)
283
+ except (ValueError, TypeError):
284
+ pass
285
+ return None
286
+
287
+
288
+ def listify(data):
289
+ """
290
+ Recursively finds tensors in a nested list/tuple/dictionary and converts them to a list of numbers.
291
+
292
+ Args:
293
+ data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to convert to regular numbers.
294
+
295
+ Returns:
296
+ The same data structure as `data` with lists of numbers instead of `torch.Tensor`.
297
+ """
298
+
299
+ def _convert_to_list(tensor):
300
+ tensor = tensor.detach().cpu()
301
+ if tensor.dtype == torch.bfloat16:
302
+ # As of Numpy 1.21.4, NumPy does not support bfloat16 (see
303
+ # https://github.com/numpy/numpy/blob/a47ecdea856986cd60eabbd53265c2ca5916ad5d/doc/source/user/basics.types.rst ).
304
+ # Until Numpy adds bfloat16, we must convert float32.
305
+ tensor = tensor.to(torch.float32)
306
+ return tensor.tolist()
307
+
308
+ return recursively_apply(_convert_to_list, data)
309
+
310
+
311
+ def _tpu_gather(tensor):
312
+ def _tpu_gather_one(tensor):
313
+ if tensor.ndim == 0:
314
+ tensor = tensor.clone()[None]
315
+
316
+ # Can only gather contiguous tensors
317
+ if not tensor.is_contiguous():
318
+ tensor = tensor.contiguous()
319
+ return xm.all_gather(tensor)
320
+
321
+ res = recursively_apply(_tpu_gather_one, tensor, error_on_other_type=True)
322
+ xm.mark_step()
323
+ return res
324
+
325
+
326
+ def _gpu_gather(tensor):
327
+ state = PartialState()
328
+ if is_torch_version(">=", "1.13"):
329
+ gather_op = torch.distributed.all_gather_into_tensor
330
+ else:
331
+ gather_op = torch.distributed._all_gather_base
332
+
333
+ def _gpu_gather_one(tensor):
334
+ if tensor.ndim == 0:
335
+ tensor = tensor.clone()[None]
336
+
337
+ # Can only gather contiguous tensors
338
+ if not tensor.is_contiguous():
339
+ tensor = tensor.contiguous()
340
+
341
+ if state.backend is not None and state.backend != "gloo":
342
+ # We use `empty` as `all_gather_into_tensor` slightly
343
+ # differs from `all_gather` for better efficiency,
344
+ # and we rely on the number of items in the tensor
345
+ # rather than its direct shape
346
+ output_tensors = torch.empty(
347
+ state.num_processes * tensor.numel(),
348
+ dtype=tensor.dtype,
349
+ device=state.device,
350
+ )
351
+ gather_op(output_tensors, tensor)
352
+ return output_tensors.view(-1, *tensor.size()[1:])
353
+ else:
354
+ # a backend of `None` is always CPU
355
+ # also gloo does not support `all_gather_into_tensor`,
356
+ # which will result in a larger memory overhead for the op
357
+ output_tensors = [torch.empty_like(tensor) for _ in range(state.num_processes)]
358
+ torch.distributed.all_gather(output_tensors, tensor)
359
+ return torch.cat(output_tensors, dim=0)
360
+
361
+ return recursively_apply(_gpu_gather_one, tensor, error_on_other_type=True)
362
+
363
+
364
+ class DistributedOperationException(Exception):
365
+ """
366
+ An exception class for distributed operations. Raised if the operation cannot be performed due to the shape of the
367
+ tensors.
368
+ """
369
+
370
+ pass
371
+
372
+
373
+ def verify_operation(function):
374
+ """
375
+ Verifies that `tensor` is the same shape across all processes. Only ran if `PartialState().debug` is `True`.
376
+ """
377
+
378
+ @wraps(function)
379
+ def wrapper(*args, **kwargs):
380
+ if PartialState().distributed_type == DistributedType.NO or not PartialState().debug:
381
+ return function(*args, **kwargs)
382
+ operation = f"{function.__module__}.{function.__name__}"
383
+ if "tensor" in kwargs:
384
+ tensor = kwargs["tensor"]
385
+ else:
386
+ tensor = args[0]
387
+ if PartialState().device.type != find_device(tensor).type:
388
+ raise DistributedOperationException(
389
+ f"One or more of the tensors passed to {operation} were not on the {tensor.device.type} while the `Accelerator` is configured for {PartialState().device.type}. "
390
+ f"Please move it to the {PartialState().device.type} before calling {operation}."
391
+ )
392
+ shapes = get_shape(tensor)
393
+ output = gather_object([shapes])
394
+ if output[0] is not None:
395
+ are_same = output.count(output[0]) == len(output)
396
+ if not are_same:
397
+ process_shape_str = "\n - ".join([f"Process {i}: {shape}" for i, shape in enumerate(output)])
398
+ raise DistributedOperationException(
399
+ f"Cannot apply desired operation due to shape mismatches. "
400
+ "All shapes across devices must be valid."
401
+ f"\n\nOperation: `{operation}`\nInput shapes:\n - {process_shape_str}"
402
+ )
403
+ return function(*args, **kwargs)
404
+
405
+ return wrapper
406
+
407
+
408
+ def chained_operation(function):
409
+ """
410
+ Checks that `verify_operation` failed and if so reports a more helpful error chaining the existing
411
+ `DistributedOperationException`.
412
+ """
413
+
414
+ @wraps(function)
415
+ def wrapper(*args, **kwargs):
416
+ try:
417
+ return function(*args, **kwargs)
418
+ except DistributedOperationException as e:
419
+ operation = f"{function.__module__}.{function.__name__}"
420
+ raise DistributedOperationException(
421
+ f"Error found while calling `{operation}`. Please see the earlier error for more details."
422
+ ) from e
423
+
424
+ return wrapper
425
+
426
+
427
+ @verify_operation
428
+ def gather(tensor):
429
+ """
430
+ Recursively gather tensor in a nested list/tuple/dictionary of tensors from all devices.
431
+
432
+ Args:
433
+ tensor (nested list/tuple/dictionary of `torch.Tensor`):
434
+ The data to gather.
435
+
436
+ Returns:
437
+ The same data structure as `tensor` with all tensors sent to the proper device.
438
+ """
439
+ if PartialState().distributed_type == DistributedType.XLA:
440
+ return _tpu_gather(tensor)
441
+ elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES:
442
+ return _gpu_gather(tensor)
443
+ else:
444
+ return tensor
445
+
446
+
447
+ def _gpu_gather_object(object: Any):
448
+ output_objects = [None for _ in range(PartialState().num_processes)]
449
+ torch.distributed.all_gather_object(output_objects, object)
450
+ # all_gather_object returns a list of lists, so we need to flatten it
451
+ return [x for y in output_objects for x in y]
452
+
453
+
454
+ def gather_object(object: Any):
455
+ """
456
+ Recursively gather object in a nested list/tuple/dictionary of objects from all devices.
457
+
458
+ Args:
459
+ object (nested list/tuple/dictionary of picklable object):
460
+ The data to gather.
461
+
462
+ Returns:
463
+ The same data structure as `object` with all the objects sent to every device.
464
+ """
465
+ if PartialState().distributed_type == DistributedType.XLA:
466
+ raise NotImplementedError("gather objects in TPU is not supported")
467
+ elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES:
468
+ return _gpu_gather_object(object)
469
+ else:
470
+ return object
471
+
472
+
473
+ def _gpu_broadcast(data, src=0):
474
+ def _gpu_broadcast_one(tensor, src=0):
475
+ torch.distributed.broadcast(tensor, src=src)
476
+ return tensor
477
+
478
+ return recursively_apply(_gpu_broadcast_one, data, error_on_other_type=True, src=src)
479
+
480
+
481
+ def _tpu_broadcast(tensor, src=0, name="broadcast tensor"):
482
+ if isinstance(tensor, (list, tuple)):
483
+ return honor_type(tensor, (_tpu_broadcast(t, name=f"{name}_{i}") for i, t in enumerate(tensor)))
484
+ elif isinstance(tensor, Mapping):
485
+ return type(tensor)({k: _tpu_broadcast(v, name=f"{name}_{k}") for k, v in tensor.items()})
486
+ return xm.mesh_reduce(name, tensor, lambda x: x[src])
487
+
488
+
489
+ TENSOR_TYPE_TO_INT = {
490
+ torch.float: 1,
491
+ torch.double: 2,
492
+ torch.half: 3,
493
+ torch.bfloat16: 4,
494
+ torch.uint8: 5,
495
+ torch.int8: 6,
496
+ torch.int16: 7,
497
+ torch.int32: 8,
498
+ torch.int64: 9,
499
+ torch.bool: 10,
500
+ }
501
+
502
+ TENSOR_INT_TO_DTYPE = {v: k for k, v in TENSOR_TYPE_TO_INT.items()}
503
+
504
+
505
+ def gather_tensor_shape(tensor):
506
+ """
507
+ Grabs the shape of `tensor` only available on one process and returns a tensor of its shape
508
+ """
509
+ # Allocate 80 bytes to store the shape
510
+ max_tensor_dimension = 2**20
511
+ state = PartialState()
512
+ base_tensor = torch.empty(max_tensor_dimension, dtype=torch.int, device=state.device)
513
+
514
+ # Since PyTorch can't just send a tensor to another GPU without
515
+ # knowing its size, we store the size of the tensor with data
516
+ # in an allocation
517
+ if tensor is not None:
518
+ shape = tensor.shape
519
+ tensor_dtype = TENSOR_TYPE_TO_INT[tensor.dtype]
520
+ base_tensor[: len(shape) + 1] = torch.tensor(list(shape) + [tensor_dtype], dtype=int)
521
+ # Perform a reduction to copy the size data onto all GPUs
522
+ base_tensor = reduce(base_tensor, reduction="sum")
523
+ base_tensor = base_tensor[base_tensor.nonzero()]
524
+ # The last non-zero data contains the coded dtype the source tensor is
525
+ dtype = int(base_tensor[-1:][0])
526
+ base_tensor = base_tensor[:-1]
527
+ return base_tensor, dtype
528
+
529
+
530
+ def copy_tensor_to_devices(tensor=None) -> torch.Tensor:
531
+ """
532
+ Copys a tensor that only exists on a single device and broadcasts it to other devices. Differs from `broadcast` as
533
+ each worker doesn't need to know its shape when used (and tensor can be `None`)
534
+
535
+ Args:
536
+ tensor (`torch.tensor`):
537
+ The tensor that should be sent to all devices. Must only have it be defined on a single device, the rest
538
+ should be `None`.
539
+ """
540
+ state = PartialState()
541
+ shape, dtype = gather_tensor_shape(tensor)
542
+ if tensor is None:
543
+ tensor = torch.zeros(shape, dtype=TENSOR_INT_TO_DTYPE[dtype]).to(state.device)
544
+ return reduce(tensor, reduction="sum")
545
+
546
+
547
+ @verify_operation
548
+ def broadcast(tensor, from_process: int = 0):
549
+ """
550
+ Recursively broadcast tensor in a nested list/tuple/dictionary of tensors to all devices.
551
+
552
+ Args:
553
+ tensor (nested list/tuple/dictionary of `torch.Tensor`):
554
+ The data to gather.
555
+ from_process (`int`, *optional*, defaults to 0):
556
+ The process from which to send the data
557
+
558
+ Returns:
559
+ The same data structure as `tensor` with all tensors broadcasted to the proper device.
560
+ """
561
+ if PartialState().distributed_type == DistributedType.XLA:
562
+ return _tpu_broadcast(tensor, src=from_process, name="accelerate.utils.broadcast")
563
+ elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES:
564
+ return _gpu_broadcast(tensor, src=from_process)
565
+ else:
566
+ return tensor
567
+
568
+
569
+ def broadcast_object_list(object_list, from_process: int = 0):
570
+ """
571
+ Broadcast a list of picklable objects form one process to the others.
572
+
573
+ Args:
574
+ object_list (list of picklable objects):
575
+ The list of objects to broadcast. This list will be modified inplace.
576
+ from_process (`int`, *optional*, defaults to 0):
577
+ The process from which to send the data.
578
+
579
+ Returns:
580
+ The same list containing the objects from process 0.
581
+ """
582
+ if PartialState().distributed_type == DistributedType.XLA:
583
+ for i, obj in enumerate(object_list):
584
+ object_list[i] = xm.mesh_reduce("accelerate.utils.broadcast_object_list", obj, lambda x: x[from_process])
585
+ elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES:
586
+ torch.distributed.broadcast_object_list(object_list, src=from_process)
587
+ return object_list
588
+
589
+
590
+ def slice_tensors(data, tensor_slice, process_index=None, num_processes=None):
591
+ """
592
+ Recursively takes a slice in a nested list/tuple/dictionary of tensors.
593
+
594
+ Args:
595
+ data (nested list/tuple/dictionary of `torch.Tensor`):
596
+ The data to slice.
597
+ tensor_slice (`slice`):
598
+ The slice to take.
599
+
600
+ Returns:
601
+ The same data structure as `data` with all the tensors slices.
602
+ """
603
+
604
+ def _slice_tensor(tensor, tensor_slice):
605
+ return tensor[tensor_slice]
606
+
607
+ return recursively_apply(_slice_tensor, data, tensor_slice)
608
+
609
+
610
+ def concatenate(data, dim=0):
611
+ """
612
+ Recursively concatenate the tensors in a nested list/tuple/dictionary of lists of tensors with the same shape.
613
+
614
+ Args:
615
+ data (nested list/tuple/dictionary of lists of tensors `torch.Tensor`):
616
+ The data to concatenate.
617
+ dim (`int`, *optional*, defaults to 0):
618
+ The dimension on which to concatenate.
619
+
620
+ Returns:
621
+ The same data structure as `data` with all the tensors concatenated.
622
+ """
623
+ if isinstance(data[0], (tuple, list)):
624
+ return honor_type(data[0], (concatenate([d[i] for d in data], dim=dim) for i in range(len(data[0]))))
625
+ elif isinstance(data[0], Mapping):
626
+ return type(data[0])({k: concatenate([d[k] for d in data], dim=dim) for k in data[0].keys()})
627
+ elif not isinstance(data[0], torch.Tensor):
628
+ raise TypeError(f"Can only concatenate tensors but got {type(data[0])}")
629
+ return torch.cat(data, dim=dim)
630
+
631
+
632
+ class CannotPadNestedTensorWarning(UserWarning):
633
+ pass
634
+
635
+
636
+ @chained_operation
637
+ def pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):
638
+ """
639
+ Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so they
640
+ can safely be gathered.
641
+
642
+ Args:
643
+ tensor (nested list/tuple/dictionary of `torch.Tensor`):
644
+ The data to gather.
645
+ dim (`int`, *optional*, defaults to 0):
646
+ The dimension on which to pad.
647
+ pad_index (`int`, *optional*, defaults to 0):
648
+ The value with which to pad.
649
+ pad_first (`bool`, *optional*, defaults to `False`):
650
+ Whether to pad at the beginning or the end.
651
+ """
652
+
653
+ def _pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):
654
+ if getattr(tensor, "is_nested", False):
655
+ warnings.warn(
656
+ "Cannot pad nested tensors without more information. Leaving unprocessed.",
657
+ CannotPadNestedTensorWarning,
658
+ )
659
+ return tensor
660
+ if dim >= len(tensor.shape):
661
+ return tensor
662
+
663
+ # Gather all sizes
664
+ size = torch.tensor(tensor.shape, device=tensor.device)[None]
665
+ sizes = gather(size).cpu()
666
+ # Then pad to the maximum size
667
+ max_size = max(s[dim] for s in sizes)
668
+ if max_size == tensor.shape[dim]:
669
+ return tensor
670
+
671
+ old_size = tensor.shape
672
+ new_size = list(old_size)
673
+ new_size[dim] = max_size
674
+ new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index
675
+ if pad_first:
676
+ indices = tuple(
677
+ slice(max_size - old_size[dim], max_size) if i == dim else slice(None) for i in range(len(new_size))
678
+ )
679
+ else:
680
+ indices = tuple(slice(0, old_size[dim]) if i == dim else slice(None) for i in range(len(new_size)))
681
+ new_tensor[indices] = tensor
682
+ return new_tensor
683
+
684
+ return recursively_apply(
685
+ _pad_across_processes, tensor, error_on_other_type=True, dim=dim, pad_index=pad_index, pad_first=pad_first
686
+ )
687
+
688
+
689
+ def pad_input_tensors(tensor, batch_size, num_processes, dim=0):
690
+ """
691
+ Takes a `tensor` of arbitrary size and pads it so that it can work given `num_processes` needed dimensions.
692
+
693
+ New tensors are just the last input repeated.
694
+
695
+ E.g.:
696
+ Tensor: ([3,4,4]) Num processes: 4 Expected result shape: ([4,4,4])
697
+
698
+ """
699
+
700
+ def _pad_input_tensors(tensor, batch_size, num_processes, dim=0):
701
+ remainder = batch_size // num_processes
702
+ last_inputs = batch_size - (remainder * num_processes)
703
+ if batch_size // num_processes == 0:
704
+ to_pad = num_processes - batch_size
705
+ else:
706
+ to_pad = num_processes - (batch_size // num_processes)
707
+ # In the rare case that `to_pad` is negative,
708
+ # we need to pad the last inputs - the found `to_pad`
709
+ if last_inputs > to_pad & to_pad < 1:
710
+ to_pad = last_inputs - to_pad
711
+ old_size = tensor.shape
712
+ new_size = list(old_size)
713
+ new_size[0] = batch_size + to_pad
714
+ new_tensor = tensor.new_zeros(tuple(new_size))
715
+ indices = tuple(slice(0, old_size[dim]) if i == dim else slice(None) for i in range(len(new_size)))
716
+ new_tensor[indices] = tensor
717
+ return new_tensor
718
+
719
+ return recursively_apply(
720
+ _pad_input_tensors,
721
+ tensor,
722
+ error_on_other_type=True,
723
+ batch_size=batch_size,
724
+ num_processes=num_processes,
725
+ dim=dim,
726
+ )
727
+
728
+
729
+ @verify_operation
730
+ def reduce(tensor, reduction="mean", scale=1.0):
731
+ """
732
+ Recursively reduce the tensors in a nested list/tuple/dictionary of lists of tensors across all processes by the
733
+ mean of a given operation.
734
+
735
+ Args:
736
+ tensor (nested list/tuple/dictionary of `torch.Tensor`):
737
+ The data to reduce.
738
+ reduction (`str`, *optional*, defaults to `"mean"`):
739
+ A reduction method. Can be of "mean", "sum", or "none"
740
+ scale (`float`, *optional*):
741
+ A default scaling value to be applied after the reduce, only valied on XLA.
742
+
743
+ Returns:
744
+ The same data structure as `data` with all the tensors reduced.
745
+ """
746
+
747
+ def _reduce_across_processes(tensor, reduction="mean", scale=1.0):
748
+ state = PartialState()
749
+ cloned_tensor = tensor.clone()
750
+ if state.distributed_type == DistributedType.NO:
751
+ return cloned_tensor
752
+ if state.distributed_type == DistributedType.XLA:
753
+ # Some processes may have different HLO graphs than other
754
+ # processes, for example in the breakpoint API
755
+ # accelerator.set_trigger(). Use mark_step to make HLOs
756
+ # the same on all processes.
757
+ xm.mark_step()
758
+ xm.all_reduce(xm.REDUCE_SUM, [cloned_tensor], scale)
759
+ xm.mark_step()
760
+ elif state.distributed_type.value in TORCH_DISTRIBUTED_OPERATION_TYPES:
761
+ torch.distributed.all_reduce(cloned_tensor, ReduceOp.SUM)
762
+ if reduction == "mean":
763
+ cloned_tensor /= state.num_processes
764
+ return cloned_tensor
765
+
766
+ return recursively_apply(
767
+ _reduce_across_processes, tensor, error_on_other_type=True, reduction=reduction, scale=scale
768
+ )
769
+
770
+
771
+ def convert_to_fp32(tensor):
772
+ """
773
+ Recursively converts the elements nested list/tuple/dictionary of tensors in FP16/BF16 precision to FP32.
774
+
775
+ Args:
776
+ tensor (nested list/tuple/dictionary of `torch.Tensor`):
777
+ The data to convert from FP16/BF16 to FP32.
778
+
779
+ Returns:
780
+ The same data structure as `tensor` with all tensors that were in FP16/BF16 precision converted to FP32.
781
+ """
782
+
783
+ def _convert_to_fp32(tensor):
784
+ return tensor.float()
785
+
786
+ def _is_fp16_bf16_tensor(tensor):
787
+ return (is_torch_tensor(tensor) or hasattr(tensor, "dtype")) and tensor.dtype in (
788
+ torch.float16,
789
+ torch.bfloat16,
790
+ )
791
+
792
+ return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_bf16_tensor)
793
+
794
+
795
+ class ConvertOutputsToFp32:
796
+ """
797
+ Decorator to apply to a function outputing tensors (like a model forward pass) that ensures the outputs in FP16
798
+ precision will be convert back to FP32.
799
+
800
+ Args:
801
+ model_forward (`Callable`):
802
+ The function which outputs we want to treat.
803
+
804
+ Returns:
805
+ The same function as `model_forward` but with converted outputs.
806
+ """
807
+
808
+ def __init__(self, model_forward):
809
+ self.model_forward = model_forward
810
+ update_wrapper(self, model_forward)
811
+
812
+ def __call__(self, *args, **kwargs):
813
+ return convert_to_fp32(self.model_forward(*args, **kwargs))
814
+
815
+ def __getstate__(self):
816
+ raise pickle.PicklingError(
817
+ "Cannot pickle a prepared model with automatic mixed precision, please unwrap the model with `Accelerator.unwrap_model(model)` before pickling it."
818
+ )
819
+
820
+
821
+ def convert_outputs_to_fp32(model_forward):
822
+ model_forward = ConvertOutputsToFp32(model_forward)
823
+
824
+ def forward(*args, **kwargs):
825
+ return model_forward(*args, **kwargs)
826
+
827
+ # To act like a decorator so that it can be popped when doing `extract_model_from_parallel`
828
+ forward.__wrapped__ = model_forward
829
+
830
+ return forward
831
+
832
+
833
+ def find_device(data):
834
+ """
835
+ Finds the device on which a nested dict/list/tuple of tensors lies (assuming they are all on the same device).
836
+
837
+ Args:
838
+ (nested list/tuple/dictionary of `torch.Tensor`): The data we want to know the device of.
839
+ """
840
+ if isinstance(data, Mapping):
841
+ for obj in data.values():
842
+ device = find_device(obj)
843
+ if device is not None:
844
+ return device
845
+ elif isinstance(data, (tuple, list)):
846
+ for obj in data:
847
+ device = find_device(obj)
848
+ if device is not None:
849
+ return device
850
+ elif isinstance(data, torch.Tensor):
851
+ return data.device
venv/lib/python3.10/site-packages/accelerate/utils/other.py ADDED
@@ -0,0 +1,366 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import collections
16
+ import os
17
+ import platform
18
+ import re
19
+ import socket
20
+ from contextlib import contextmanager
21
+ from functools import partial, reduce
22
+ from types import MethodType
23
+ from typing import OrderedDict
24
+
25
+ import torch
26
+ from packaging.version import Version
27
+ from safetensors.torch import save_file as safe_save_file
28
+
29
+ from ..commands.config.default import write_basic_config # noqa: F401
30
+ from ..logging import get_logger
31
+ from ..state import PartialState
32
+ from .constants import FSDP_PYTORCH_VERSION
33
+ from .dataclasses import DistributedType
34
+ from .imports import is_deepspeed_available, is_torch_distributed_available, is_torch_xla_available
35
+ from .modeling import id_tensor_storage
36
+ from .transformer_engine import convert_model
37
+ from .versions import is_torch_version
38
+
39
+
40
+ logger = get_logger(__name__)
41
+
42
+
43
+ if is_torch_xla_available():
44
+ import torch_xla.core.xla_model as xm
45
+
46
+
47
+ def is_compiled_module(module):
48
+ """
49
+ Check whether the module was compiled with torch.compile()
50
+ """
51
+ if is_torch_version("<", "2.0.0") or not hasattr(torch, "_dynamo"):
52
+ return False
53
+ return isinstance(module, torch._dynamo.eval_frame.OptimizedModule)
54
+
55
+
56
+ def extract_model_from_parallel(model, keep_fp32_wrapper: bool = True, recursive: bool = False):
57
+ """
58
+ Extract a model from its distributed containers.
59
+
60
+ Args:
61
+ model (`torch.nn.Module`):
62
+ The model to extract.
63
+ keep_fp32_wrapper (`bool`, *optional*):
64
+ Whether to remove mixed precision hooks from the model.
65
+ recursive (`bool`, *optional*, defaults to `False`):
66
+ Whether to recursively extract all cases of `module.module` from `model` as well as unwrap child sublayers
67
+ recursively, not just the top-level distributed containers.
68
+
69
+ Returns:
70
+ `torch.nn.Module`: The extracted model.
71
+ """
72
+ options = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
73
+
74
+ is_compiled = is_compiled_module(model)
75
+ if is_compiled:
76
+ compiled_model = model
77
+ model = model._orig_mod
78
+
79
+ if is_deepspeed_available():
80
+ from deepspeed import DeepSpeedEngine
81
+
82
+ options += (DeepSpeedEngine,)
83
+
84
+ if is_torch_version(">=", FSDP_PYTORCH_VERSION) and is_torch_distributed_available():
85
+ from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
86
+
87
+ options += (FSDP,)
88
+
89
+ while isinstance(model, options):
90
+ model = model.module
91
+
92
+ if recursive:
93
+ # This is needed in cases such as using FSDPv2 on XLA
94
+ def _recursive_unwrap(module):
95
+ # Wrapped modules are standardly wrapped as `module`, similar to the cases earlier
96
+ # with DDP, DataParallel, DeepSpeed, and FSDP
97
+ if hasattr(module, "module"):
98
+ unwrapped_module = _recursive_unwrap(module.module)
99
+ else:
100
+ unwrapped_module = module
101
+ # Next unwrap child sublayers recursively
102
+ for name, child in unwrapped_module.named_children():
103
+ setattr(unwrapped_module, name, _recursive_unwrap(child))
104
+ return unwrapped_module
105
+
106
+ # Start with top-level
107
+ model = _recursive_unwrap(model)
108
+
109
+ if not keep_fp32_wrapper:
110
+ forward = model.forward
111
+ original_forward = model.__dict__.pop("_original_forward", None)
112
+ if original_forward is not None:
113
+ while hasattr(forward, "__wrapped__"):
114
+ forward = forward.__wrapped__
115
+ if forward == original_forward:
116
+ break
117
+ model.forward = MethodType(forward, model)
118
+ if getattr(model, "_converted_to_transformer_engine", False):
119
+ convert_model(model, to_transformer_engine=False)
120
+
121
+ if is_compiled:
122
+ compiled_model._orig_mod = model
123
+ model = compiled_model
124
+
125
+ return model
126
+
127
+
128
+ def wait_for_everyone():
129
+ """
130
+ Introduces a blocking point in the script, making sure all processes have reached this point before continuing.
131
+
132
+ <Tip warning={true}>
133
+
134
+ Make sure all processes will reach this instruction otherwise one of your processes will hang forever.
135
+
136
+ </Tip>
137
+ """
138
+ PartialState().wait_for_everyone()
139
+
140
+
141
+ def clean_state_dict_for_safetensors(state_dict: dict):
142
+ """
143
+ Cleans the state dictionary from a model and removes tensor aliasing if present.
144
+
145
+ Args:
146
+ state_dict (`dict`):
147
+ The state dictionary from a model
148
+ """
149
+ ptrs = collections.defaultdict(list)
150
+ # When bnb serialization is used, weights in state dict can be strings
151
+ for name, tensor in state_dict.items():
152
+ if not isinstance(tensor, str):
153
+ ptrs[id_tensor_storage(tensor)].append(name)
154
+
155
+ # These are all pointers of tensors with shared memory
156
+ shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1}
157
+ warn_names = set()
158
+ for names in shared_ptrs.values():
159
+ # When not all duplicates have been cleaned, we still remove those keys but put a clear warning.
160
+ # If the link between tensors was done at runtime then `from_pretrained` will not get
161
+ # the key back leading to random tensor. A proper warning will be shown
162
+ # during reload (if applicable), but since the file is not necessarily compatible with
163
+ # the config, better show a proper warning.
164
+ found_names = [name for name in names if name in state_dict]
165
+ warn_names.update(found_names[1:])
166
+ for name in found_names[1:]:
167
+ del state_dict[name]
168
+ if len(warn_names) > 0:
169
+ logger.warning(
170
+ f"Removed shared tensor {warn_names} while saving. This should be OK, but check by verifying that you don't receive any warning while reloading",
171
+ )
172
+ state_dict = {k: v.contiguous() if isinstance(v, torch.Tensor) else v for k, v in state_dict.items()}
173
+ return state_dict
174
+
175
+
176
+ def save(obj, f, save_on_each_node: bool = False, safe_serialization: bool = False):
177
+ """
178
+ Save the data to disk. Use in place of `torch.save()`.
179
+
180
+ Args:
181
+ obj:
182
+ The data to save
183
+ f:
184
+ The file (or file-like object) to use to save the data
185
+ save_on_each_node (`bool`, *optional*, defaults to `False`):
186
+ Whether to only save on the global main process
187
+ safe_serialization (`bool`, *optional*, defaults to `False`):
188
+ Whether to save `obj` using `safetensors` or the traditional PyTorch way (that uses `pickle`).
189
+ """
190
+ # When TorchXLA is enabled, it's necessary to transfer all data to the CPU before saving.
191
+ # Another issue arises with `id_tensor_storage`, which treats all XLA tensors as identical.
192
+ # If tensors remain on XLA, calling `clean_state_dict_for_safetensors` will result in only
193
+ # one XLA tensor remaining.
194
+ if PartialState().distributed_type == DistributedType.XLA:
195
+ obj = xm._maybe_convert_to_cpu(obj)
196
+ # Check if it's a model and remove duplicates
197
+ if safe_serialization:
198
+ save_func = partial(safe_save_file, metadata={"format": "pt"})
199
+ if isinstance(obj, OrderedDict):
200
+ obj = clean_state_dict_for_safetensors(obj)
201
+ else:
202
+ save_func = torch.save
203
+
204
+ if PartialState().is_main_process and not save_on_each_node:
205
+ save_func(obj, f)
206
+ elif PartialState().is_local_main_process and save_on_each_node:
207
+ save_func(obj, f)
208
+
209
+
210
+ @contextmanager
211
+ def clear_environment():
212
+ """
213
+ A context manager that will temporarily clear environment variables.
214
+
215
+ When this context exits, the previous environment variables will be back.
216
+
217
+ Example:
218
+
219
+ ```python
220
+ >>> import os
221
+ >>> from accelerate.utils import clear_environment
222
+
223
+ >>> os.environ["FOO"] = "bar"
224
+ >>> with clear_environment():
225
+ ... print(os.environ)
226
+ ... os.environ["FOO"] = "new_bar"
227
+ ... print(os.environ["FOO"])
228
+ {}
229
+ new_bar
230
+
231
+ >>> print(os.environ["FOO"])
232
+ bar
233
+ ```
234
+ """
235
+ _old_os_environ = os.environ.copy()
236
+ os.environ.clear()
237
+
238
+ try:
239
+ yield
240
+ finally:
241
+ os.environ.clear() # clear any added keys,
242
+ os.environ.update(_old_os_environ) # then restore previous environment
243
+
244
+
245
+ @contextmanager
246
+ def patch_environment(**kwargs):
247
+ """
248
+ A context manager that will add each keyword argument passed to `os.environ` and remove them when exiting.
249
+
250
+ Will convert the values in `kwargs` to strings and upper-case all the keys.
251
+
252
+ Example:
253
+
254
+ ```python
255
+ >>> import os
256
+ >>> from accelerate.utils import patch_environment
257
+
258
+ >>> with patch_environment(FOO="bar"):
259
+ ... print(os.environ["FOO"]) # prints "bar"
260
+ >>> print(os.environ["FOO"]) # raises KeyError
261
+ ```
262
+ """
263
+ existing_vars = {}
264
+ for key, value in kwargs.items():
265
+ key = key.upper()
266
+ if key in os.environ:
267
+ existing_vars[key] = os.environ[key]
268
+ os.environ[key] = str(value)
269
+
270
+ try:
271
+ yield
272
+ finally:
273
+ for key in kwargs:
274
+ key = key.upper()
275
+ if key in existing_vars:
276
+ # restore previous value
277
+ os.environ[key] = existing_vars[key]
278
+ else:
279
+ os.environ.pop(key, None)
280
+
281
+
282
+ def get_pretty_name(obj):
283
+ """
284
+ Gets a pretty name from `obj`.
285
+ """
286
+ if not hasattr(obj, "__qualname__") and not hasattr(obj, "__name__"):
287
+ obj = getattr(obj, "__class__", obj)
288
+ if hasattr(obj, "__qualname__"):
289
+ return obj.__qualname__
290
+ if hasattr(obj, "__name__"):
291
+ return obj.__name__
292
+ return str(obj)
293
+
294
+
295
+ def merge_dicts(source, destination):
296
+ """
297
+ Recursively merges two dictionaries.
298
+
299
+ Args:
300
+ source (`dict`): The dictionary to merge into `destination`.
301
+ destination (`dict`): The dictionary to merge `source` into.
302
+ """
303
+ for key, value in source.items():
304
+ if isinstance(value, dict):
305
+ node = destination.setdefault(key, {})
306
+ merge_dicts(value, node)
307
+ else:
308
+ destination[key] = value
309
+
310
+ return destination
311
+
312
+
313
+ def is_port_in_use(port: int = None) -> bool:
314
+ """
315
+ Checks if a port is in use on `localhost`. Useful for checking if multiple `accelerate launch` commands have been
316
+ run and need to see if the port is already in use.
317
+ """
318
+ if port is None:
319
+ port = 29500
320
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
321
+ return s.connect_ex(("localhost", port)) == 0
322
+
323
+
324
+ def convert_bytes(size):
325
+ "Converts `size` from bytes to the largest possible unit"
326
+ for x in ["bytes", "KB", "MB", "GB", "TB"]:
327
+ if size < 1024.0:
328
+ return f"{round(size, 2)} {x}"
329
+ size /= 1024.0
330
+
331
+ return f"{round(size, 2)} PB"
332
+
333
+
334
+ def check_os_kernel():
335
+ """Warns if the kernel version is below the recommended minimum on Linux."""
336
+ # see issue #1929
337
+ info = platform.uname()
338
+ system = info.system
339
+ if system != "Linux":
340
+ return
341
+
342
+ _, version, *_ = re.split(r"(\d+\.\d+\.\d+)", info.release)
343
+ min_version = "5.5.0"
344
+ if Version(version) < Version(min_version):
345
+ msg = (
346
+ f"Detected kernel version {version}, which is below the recommended minimum of {min_version}; this can "
347
+ "cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher."
348
+ )
349
+ logger.warning(msg, main_process_only=True)
350
+
351
+
352
+ def recursive_getattr(obj, attr: str):
353
+ """
354
+ Recursive `getattr`.
355
+
356
+ Args:
357
+ obj:
358
+ A class instance holding the attribute.
359
+ attr (`str`):
360
+ The attribute that is to be retrieved, e.g. 'attribute1.attribute2'.
361
+ """
362
+
363
+ def _getattr(obj, attr):
364
+ return getattr(obj, attr)
365
+
366
+ return reduce(_getattr, [obj] + attr.split("."))
venv/lib/python3.10/site-packages/accelerate/utils/random.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import random
16
+ from typing import List, Optional, Union
17
+
18
+ import numpy as np
19
+ import torch
20
+
21
+ from ..state import AcceleratorState
22
+ from .constants import CUDA_DISTRIBUTED_TYPES
23
+ from .dataclasses import DistributedType, RNGType
24
+ from .imports import is_mlu_available, is_npu_available, is_torch_xla_available, is_xpu_available
25
+
26
+
27
+ if is_torch_xla_available():
28
+ import torch_xla.core.xla_model as xm
29
+
30
+
31
+ def set_seed(seed: int, device_specific: bool = False, deterministic: bool = False):
32
+ """
33
+ Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch`.
34
+
35
+ Args:
36
+ seed (`int`):
37
+ The seed to set.
38
+ device_specific (`bool`, *optional*, defaults to `False`):
39
+ Whether to differ the seed on each device slightly with `self.process_index`.
40
+ deterministic (`bool`, *optional*, defaults to `False`):
41
+ Whether to use deterministic algorithms where available. Can slow down training.
42
+ """
43
+ if device_specific:
44
+ seed += AcceleratorState().process_index
45
+ random.seed(seed)
46
+ np.random.seed(seed)
47
+ torch.manual_seed(seed)
48
+ if is_xpu_available():
49
+ torch.xpu.manual_seed_all(seed)
50
+ elif is_npu_available():
51
+ torch.npu.manual_seed_all(seed)
52
+ elif is_mlu_available():
53
+ torch.mlu.manual_seed_all(seed)
54
+ else:
55
+ torch.cuda.manual_seed_all(seed)
56
+ # ^^ safe to call this function even if cuda is not available
57
+ if is_torch_xla_available():
58
+ xm.set_rng_state(seed)
59
+
60
+ if deterministic:
61
+ torch.use_deterministic_algorithms(True)
62
+
63
+
64
+ def synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optional[torch.Generator] = None):
65
+ # Get the proper rng state
66
+ if rng_type == RNGType.TORCH:
67
+ rng_state = torch.get_rng_state()
68
+ elif rng_type == RNGType.CUDA:
69
+ rng_state = torch.cuda.get_rng_state()
70
+ elif rng_type == RNGType.XLA:
71
+ assert is_torch_xla_available(), "Can't synchronize XLA seeds as torch_xla is unavailable."
72
+ rng_state = torch.tensor(xm.get_rng_state())
73
+ elif rng_type == RNGType.NPU:
74
+ assert is_npu_available(), "Can't synchronize NPU seeds on an environment without NPUs."
75
+ rng_state = torch.npu.get_rng_state()
76
+ elif rng_type == RNGType.MLU:
77
+ assert is_mlu_available(), "Can't synchronize MLU seeds on an environment without MLUs."
78
+ rng_state = torch.mlu.get_rng_state()
79
+ elif rng_type == RNGType.XPU:
80
+ assert is_xpu_available(), "Can't synchronize XPU seeds on an environment without XPUs."
81
+ rng_state = torch.xpu.get_rng_state()
82
+ elif rng_type == RNGType.GENERATOR:
83
+ assert generator is not None, "Need a generator to synchronize its seed."
84
+ rng_state = generator.get_state()
85
+
86
+ # Broadcast the rng state from device 0 to other devices
87
+ state = AcceleratorState()
88
+ if state.distributed_type == DistributedType.XLA:
89
+ rng_state = rng_state.to(xm.xla_device())
90
+ xm.collective_broadcast([rng_state])
91
+ xm.mark_step()
92
+ rng_state = rng_state.cpu()
93
+ elif (
94
+ state.distributed_type in CUDA_DISTRIBUTED_TYPES
95
+ or state.distributed_type == DistributedType.MULTI_MLU
96
+ or state.distributed_type == DistributedType.MULTI_NPU
97
+ or state.distributed_type == DistributedType.MULTI_XPU
98
+ ):
99
+ rng_state = rng_state.to(state.device)
100
+ torch.distributed.broadcast(rng_state, 0)
101
+ rng_state = rng_state.cpu()
102
+ elif state.distributed_type == DistributedType.MULTI_CPU:
103
+ torch.distributed.broadcast(rng_state, 0)
104
+
105
+ # Set the broadcast rng state
106
+ if rng_type == RNGType.TORCH:
107
+ torch.set_rng_state(rng_state)
108
+ elif rng_type == RNGType.CUDA:
109
+ torch.cuda.set_rng_state(rng_state)
110
+ elif rng_type == RNGType.NPU:
111
+ torch.npu.set_rng_state(rng_state)
112
+ elif rng_type == RNGType.XPU:
113
+ torch.xpu.set_rng_state(rng_state)
114
+ elif rng_type == RNGType.XLA:
115
+ xm.set_rng_state(rng_state.item())
116
+ elif rng_type == RNGType.GENERATOR:
117
+ generator.set_state(rng_state)
118
+
119
+
120
+ def synchronize_rng_states(rng_types: List[Union[str, RNGType]], generator: Optional[torch.Generator] = None):
121
+ for rng_type in rng_types:
122
+ synchronize_rng_state(RNGType(rng_type), generator=generator)
venv/lib/python3.10/site-packages/accelerate/utils/torch_xla.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import importlib.metadata
16
+ import subprocess
17
+ import sys
18
+
19
+
20
+ def install_xla(upgrade: bool = False):
21
+ """
22
+ Helper function to install appropriate xla wheels based on the `torch` version in Google Colaboratory.
23
+
24
+ Args:
25
+ upgrade (`bool`, *optional*, defaults to `False`):
26
+ Whether to upgrade `torch` and install the latest `torch_xla` wheels.
27
+
28
+ Example:
29
+
30
+ ```python
31
+ >>> from accelerate.utils import install_xla
32
+
33
+ >>> install_xla(upgrade=True)
34
+ ```
35
+ """
36
+ in_colab = False
37
+ if "IPython" in sys.modules:
38
+ in_colab = "google.colab" in str(sys.modules["IPython"].get_ipython())
39
+
40
+ if in_colab:
41
+ if upgrade:
42
+ torch_install_cmd = ["pip", "install", "-U", "torch"]
43
+ subprocess.run(torch_install_cmd, check=True)
44
+ # get the current version of torch
45
+ torch_version = importlib.metadata.version("torch")
46
+ torch_version_trunc = torch_version[: torch_version.rindex(".")]
47
+ xla_wheel = f"https://storage.googleapis.com/tpu-pytorch/wheels/colab/torch_xla-{torch_version_trunc}-cp37-cp37m-linux_x86_64.whl"
48
+ xla_install_cmd = ["pip", "install", xla_wheel]
49
+ subprocess.run(xla_install_cmd, check=True)
50
+ else:
51
+ raise RuntimeError("`install_xla` utility works only on google colab.")
venv/lib/python3.10/site-packages/accelerate/utils/versions.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import importlib.metadata
16
+ from typing import Union
17
+
18
+ from packaging.version import Version, parse
19
+
20
+ from .constants import STR_OPERATION_TO_FUNC
21
+
22
+
23
+ torch_version = parse(importlib.metadata.version("torch"))
24
+
25
+
26
+ def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str):
27
+ """
28
+ Compares a library version to some requirement using a given operation.
29
+
30
+ Args:
31
+ library_or_version (`str` or `packaging.version.Version`):
32
+ A library name or a version to check.
33
+ operation (`str`):
34
+ A string representation of an operator, such as `">"` or `"<="`.
35
+ requirement_version (`str`):
36
+ The version to compare the library version against
37
+ """
38
+ if operation not in STR_OPERATION_TO_FUNC.keys():
39
+ raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}")
40
+ operation = STR_OPERATION_TO_FUNC[operation]
41
+ if isinstance(library_or_version, str):
42
+ library_or_version = parse(importlib.metadata.version(library_or_version))
43
+ return operation(library_or_version, parse(requirement_version))
44
+
45
+
46
+ def is_torch_version(operation: str, version: str):
47
+ """
48
+ Compares the current PyTorch version to a given reference with an operation.
49
+
50
+ Args:
51
+ operation (`str`):
52
+ A string representation of an operator, such as `">"` or `"<="`
53
+ version (`str`):
54
+ A string version of PyTorch
55
+ """
56
+ return compare_versions(torch_version, operation, version)
venv/lib/python3.10/site-packages/more_itertools/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ """More routines for operating on iterables, beyond itertools"""
2
+
3
+ from .more import * # noqa
4
+ from .recipes import * # noqa
5
+
6
+ __version__ = '10.2.0'
venv/lib/python3.10/site-packages/more_itertools/__init__.pyi ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .more import *
2
+ from .recipes import *
venv/lib/python3.10/site-packages/more_itertools/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (324 Bytes). View file
 
venv/lib/python3.10/site-packages/more_itertools/__pycache__/more.cpython-310.pyc ADDED
Binary file (133 kB). View file
 
venv/lib/python3.10/site-packages/more_itertools/__pycache__/recipes.cpython-310.pyc ADDED
Binary file (28.4 kB). View file
 
venv/lib/python3.10/site-packages/more_itertools/more.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/more_itertools/more.pyi ADDED
@@ -0,0 +1,695 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Stubs for more_itertools.more"""
2
+ from __future__ import annotations
3
+
4
+ from types import TracebackType
5
+ from typing import (
6
+ Any,
7
+ Callable,
8
+ Container,
9
+ ContextManager,
10
+ Generic,
11
+ Hashable,
12
+ Iterable,
13
+ Iterator,
14
+ overload,
15
+ Reversible,
16
+ Sequence,
17
+ Sized,
18
+ Type,
19
+ TypeVar,
20
+ type_check_only,
21
+ )
22
+ from typing_extensions import Protocol
23
+
24
+ # Type and type variable definitions
25
+ _T = TypeVar('_T')
26
+ _T1 = TypeVar('_T1')
27
+ _T2 = TypeVar('_T2')
28
+ _U = TypeVar('_U')
29
+ _V = TypeVar('_V')
30
+ _W = TypeVar('_W')
31
+ _T_co = TypeVar('_T_co', covariant=True)
32
+ _GenFn = TypeVar('_GenFn', bound=Callable[..., Iterator[Any]])
33
+ _Raisable = BaseException | Type[BaseException]
34
+
35
+ @type_check_only
36
+ class _SizedIterable(Protocol[_T_co], Sized, Iterable[_T_co]): ...
37
+
38
+ @type_check_only
39
+ class _SizedReversible(Protocol[_T_co], Sized, Reversible[_T_co]): ...
40
+
41
+ @type_check_only
42
+ class _SupportsSlicing(Protocol[_T_co]):
43
+ def __getitem__(self, __k: slice) -> _T_co: ...
44
+
45
+ def chunked(
46
+ iterable: Iterable[_T], n: int | None, strict: bool = ...
47
+ ) -> Iterator[list[_T]]: ...
48
+ @overload
49
+ def first(iterable: Iterable[_T]) -> _T: ...
50
+ @overload
51
+ def first(iterable: Iterable[_T], default: _U) -> _T | _U: ...
52
+ @overload
53
+ def last(iterable: Iterable[_T]) -> _T: ...
54
+ @overload
55
+ def last(iterable: Iterable[_T], default: _U) -> _T | _U: ...
56
+ @overload
57
+ def nth_or_last(iterable: Iterable[_T], n: int) -> _T: ...
58
+ @overload
59
+ def nth_or_last(iterable: Iterable[_T], n: int, default: _U) -> _T | _U: ...
60
+
61
+ class peekable(Generic[_T], Iterator[_T]):
62
+ def __init__(self, iterable: Iterable[_T]) -> None: ...
63
+ def __iter__(self) -> peekable[_T]: ...
64
+ def __bool__(self) -> bool: ...
65
+ @overload
66
+ def peek(self) -> _T: ...
67
+ @overload
68
+ def peek(self, default: _U) -> _T | _U: ...
69
+ def prepend(self, *items: _T) -> None: ...
70
+ def __next__(self) -> _T: ...
71
+ @overload
72
+ def __getitem__(self, index: int) -> _T: ...
73
+ @overload
74
+ def __getitem__(self, index: slice) -> list[_T]: ...
75
+
76
+ def consumer(func: _GenFn) -> _GenFn: ...
77
+ def ilen(iterable: Iterable[_T]) -> int: ...
78
+ def iterate(func: Callable[[_T], _T], start: _T) -> Iterator[_T]: ...
79
+ def with_iter(
80
+ context_manager: ContextManager[Iterable[_T]],
81
+ ) -> Iterator[_T]: ...
82
+ def one(
83
+ iterable: Iterable[_T],
84
+ too_short: _Raisable | None = ...,
85
+ too_long: _Raisable | None = ...,
86
+ ) -> _T: ...
87
+ def raise_(exception: _Raisable, *args: Any) -> None: ...
88
+ def strictly_n(
89
+ iterable: Iterable[_T],
90
+ n: int,
91
+ too_short: _GenFn | None = ...,
92
+ too_long: _GenFn | None = ...,
93
+ ) -> list[_T]: ...
94
+ def distinct_permutations(
95
+ iterable: Iterable[_T], r: int | None = ...
96
+ ) -> Iterator[tuple[_T, ...]]: ...
97
+ def intersperse(
98
+ e: _U, iterable: Iterable[_T], n: int = ...
99
+ ) -> Iterator[_T | _U]: ...
100
+ def unique_to_each(*iterables: Iterable[_T]) -> list[list[_T]]: ...
101
+ @overload
102
+ def windowed(
103
+ seq: Iterable[_T], n: int, *, step: int = ...
104
+ ) -> Iterator[tuple[_T | None, ...]]: ...
105
+ @overload
106
+ def windowed(
107
+ seq: Iterable[_T], n: int, fillvalue: _U, step: int = ...
108
+ ) -> Iterator[tuple[_T | _U, ...]]: ...
109
+ def substrings(iterable: Iterable[_T]) -> Iterator[tuple[_T, ...]]: ...
110
+ def substrings_indexes(
111
+ seq: Sequence[_T], reverse: bool = ...
112
+ ) -> Iterator[tuple[Sequence[_T], int, int]]: ...
113
+
114
+ class bucket(Generic[_T, _U], Container[_U]):
115
+ def __init__(
116
+ self,
117
+ iterable: Iterable[_T],
118
+ key: Callable[[_T], _U],
119
+ validator: Callable[[_U], object] | None = ...,
120
+ ) -> None: ...
121
+ def __contains__(self, value: object) -> bool: ...
122
+ def __iter__(self) -> Iterator[_U]: ...
123
+ def __getitem__(self, value: object) -> Iterator[_T]: ...
124
+
125
+ def spy(
126
+ iterable: Iterable[_T], n: int = ...
127
+ ) -> tuple[list[_T], Iterator[_T]]: ...
128
+ def interleave(*iterables: Iterable[_T]) -> Iterator[_T]: ...
129
+ def interleave_longest(*iterables: Iterable[_T]) -> Iterator[_T]: ...
130
+ def interleave_evenly(
131
+ iterables: list[Iterable[_T]], lengths: list[int] | None = ...
132
+ ) -> Iterator[_T]: ...
133
+ def collapse(
134
+ iterable: Iterable[Any],
135
+ base_type: type | None = ...,
136
+ levels: int | None = ...,
137
+ ) -> Iterator[Any]: ...
138
+ @overload
139
+ def side_effect(
140
+ func: Callable[[_T], object],
141
+ iterable: Iterable[_T],
142
+ chunk_size: None = ...,
143
+ before: Callable[[], object] | None = ...,
144
+ after: Callable[[], object] | None = ...,
145
+ ) -> Iterator[_T]: ...
146
+ @overload
147
+ def side_effect(
148
+ func: Callable[[list[_T]], object],
149
+ iterable: Iterable[_T],
150
+ chunk_size: int,
151
+ before: Callable[[], object] | None = ...,
152
+ after: Callable[[], object] | None = ...,
153
+ ) -> Iterator[_T]: ...
154
+ def sliced(
155
+ seq: _SupportsSlicing[_T], n: int, strict: bool = ...
156
+ ) -> Iterator[_T]: ...
157
+ def split_at(
158
+ iterable: Iterable[_T],
159
+ pred: Callable[[_T], object],
160
+ maxsplit: int = ...,
161
+ keep_separator: bool = ...,
162
+ ) -> Iterator[list[_T]]: ...
163
+ def split_before(
164
+ iterable: Iterable[_T], pred: Callable[[_T], object], maxsplit: int = ...
165
+ ) -> Iterator[list[_T]]: ...
166
+ def split_after(
167
+ iterable: Iterable[_T], pred: Callable[[_T], object], maxsplit: int = ...
168
+ ) -> Iterator[list[_T]]: ...
169
+ def split_when(
170
+ iterable: Iterable[_T],
171
+ pred: Callable[[_T, _T], object],
172
+ maxsplit: int = ...,
173
+ ) -> Iterator[list[_T]]: ...
174
+ def split_into(
175
+ iterable: Iterable[_T], sizes: Iterable[int | None]
176
+ ) -> Iterator[list[_T]]: ...
177
+ @overload
178
+ def padded(
179
+ iterable: Iterable[_T],
180
+ *,
181
+ n: int | None = ...,
182
+ next_multiple: bool = ...,
183
+ ) -> Iterator[_T | None]: ...
184
+ @overload
185
+ def padded(
186
+ iterable: Iterable[_T],
187
+ fillvalue: _U,
188
+ n: int | None = ...,
189
+ next_multiple: bool = ...,
190
+ ) -> Iterator[_T | _U]: ...
191
+ @overload
192
+ def repeat_last(iterable: Iterable[_T]) -> Iterator[_T]: ...
193
+ @overload
194
+ def repeat_last(iterable: Iterable[_T], default: _U) -> Iterator[_T | _U]: ...
195
+ def distribute(n: int, iterable: Iterable[_T]) -> list[Iterator[_T]]: ...
196
+ @overload
197
+ def stagger(
198
+ iterable: Iterable[_T],
199
+ offsets: _SizedIterable[int] = ...,
200
+ longest: bool = ...,
201
+ ) -> Iterator[tuple[_T | None, ...]]: ...
202
+ @overload
203
+ def stagger(
204
+ iterable: Iterable[_T],
205
+ offsets: _SizedIterable[int] = ...,
206
+ longest: bool = ...,
207
+ fillvalue: _U = ...,
208
+ ) -> Iterator[tuple[_T | _U, ...]]: ...
209
+
210
+ class UnequalIterablesError(ValueError):
211
+ def __init__(self, details: tuple[int, int, int] | None = ...) -> None: ...
212
+
213
+ @overload
214
+ def zip_equal(__iter1: Iterable[_T1]) -> Iterator[tuple[_T1]]: ...
215
+ @overload
216
+ def zip_equal(
217
+ __iter1: Iterable[_T1], __iter2: Iterable[_T2]
218
+ ) -> Iterator[tuple[_T1, _T2]]: ...
219
+ @overload
220
+ def zip_equal(
221
+ __iter1: Iterable[_T],
222
+ __iter2: Iterable[_T],
223
+ __iter3: Iterable[_T],
224
+ *iterables: Iterable[_T],
225
+ ) -> Iterator[tuple[_T, ...]]: ...
226
+ @overload
227
+ def zip_offset(
228
+ __iter1: Iterable[_T1],
229
+ *,
230
+ offsets: _SizedIterable[int],
231
+ longest: bool = ...,
232
+ fillvalue: None = None,
233
+ ) -> Iterator[tuple[_T1 | None]]: ...
234
+ @overload
235
+ def zip_offset(
236
+ __iter1: Iterable[_T1],
237
+ __iter2: Iterable[_T2],
238
+ *,
239
+ offsets: _SizedIterable[int],
240
+ longest: bool = ...,
241
+ fillvalue: None = None,
242
+ ) -> Iterator[tuple[_T1 | None, _T2 | None]]: ...
243
+ @overload
244
+ def zip_offset(
245
+ __iter1: Iterable[_T],
246
+ __iter2: Iterable[_T],
247
+ __iter3: Iterable[_T],
248
+ *iterables: Iterable[_T],
249
+ offsets: _SizedIterable[int],
250
+ longest: bool = ...,
251
+ fillvalue: None = None,
252
+ ) -> Iterator[tuple[_T | None, ...]]: ...
253
+ @overload
254
+ def zip_offset(
255
+ __iter1: Iterable[_T1],
256
+ *,
257
+ offsets: _SizedIterable[int],
258
+ longest: bool = ...,
259
+ fillvalue: _U,
260
+ ) -> Iterator[tuple[_T1 | _U]]: ...
261
+ @overload
262
+ def zip_offset(
263
+ __iter1: Iterable[_T1],
264
+ __iter2: Iterable[_T2],
265
+ *,
266
+ offsets: _SizedIterable[int],
267
+ longest: bool = ...,
268
+ fillvalue: _U,
269
+ ) -> Iterator[tuple[_T1 | _U, _T2 | _U]]: ...
270
+ @overload
271
+ def zip_offset(
272
+ __iter1: Iterable[_T],
273
+ __iter2: Iterable[_T],
274
+ __iter3: Iterable[_T],
275
+ *iterables: Iterable[_T],
276
+ offsets: _SizedIterable[int],
277
+ longest: bool = ...,
278
+ fillvalue: _U,
279
+ ) -> Iterator[tuple[_T | _U, ...]]: ...
280
+ def sort_together(
281
+ iterables: Iterable[Iterable[_T]],
282
+ key_list: Iterable[int] = ...,
283
+ key: Callable[..., Any] | None = ...,
284
+ reverse: bool = ...,
285
+ ) -> list[tuple[_T, ...]]: ...
286
+ def unzip(iterable: Iterable[Sequence[_T]]) -> tuple[Iterator[_T], ...]: ...
287
+ def divide(n: int, iterable: Iterable[_T]) -> list[Iterator[_T]]: ...
288
+ def always_iterable(
289
+ obj: object,
290
+ base_type: type | tuple[type | tuple[Any, ...], ...] | None = ...,
291
+ ) -> Iterator[Any]: ...
292
+ def adjacent(
293
+ predicate: Callable[[_T], bool],
294
+ iterable: Iterable[_T],
295
+ distance: int = ...,
296
+ ) -> Iterator[tuple[bool, _T]]: ...
297
+ @overload
298
+ def groupby_transform(
299
+ iterable: Iterable[_T],
300
+ keyfunc: None = None,
301
+ valuefunc: None = None,
302
+ reducefunc: None = None,
303
+ ) -> Iterator[tuple[_T, Iterator[_T]]]: ...
304
+ @overload
305
+ def groupby_transform(
306
+ iterable: Iterable[_T],
307
+ keyfunc: Callable[[_T], _U],
308
+ valuefunc: None,
309
+ reducefunc: None,
310
+ ) -> Iterator[tuple[_U, Iterator[_T]]]: ...
311
+ @overload
312
+ def groupby_transform(
313
+ iterable: Iterable[_T],
314
+ keyfunc: None,
315
+ valuefunc: Callable[[_T], _V],
316
+ reducefunc: None,
317
+ ) -> Iterable[tuple[_T, Iterable[_V]]]: ...
318
+ @overload
319
+ def groupby_transform(
320
+ iterable: Iterable[_T],
321
+ keyfunc: Callable[[_T], _U],
322
+ valuefunc: Callable[[_T], _V],
323
+ reducefunc: None,
324
+ ) -> Iterable[tuple[_U, Iterator[_V]]]: ...
325
+ @overload
326
+ def groupby_transform(
327
+ iterable: Iterable[_T],
328
+ keyfunc: None,
329
+ valuefunc: None,
330
+ reducefunc: Callable[[Iterator[_T]], _W],
331
+ ) -> Iterable[tuple[_T, _W]]: ...
332
+ @overload
333
+ def groupby_transform(
334
+ iterable: Iterable[_T],
335
+ keyfunc: Callable[[_T], _U],
336
+ valuefunc: None,
337
+ reducefunc: Callable[[Iterator[_T]], _W],
338
+ ) -> Iterable[tuple[_U, _W]]: ...
339
+ @overload
340
+ def groupby_transform(
341
+ iterable: Iterable[_T],
342
+ keyfunc: None,
343
+ valuefunc: Callable[[_T], _V],
344
+ reducefunc: Callable[[Iterable[_V]], _W],
345
+ ) -> Iterable[tuple[_T, _W]]: ...
346
+ @overload
347
+ def groupby_transform(
348
+ iterable: Iterable[_T],
349
+ keyfunc: Callable[[_T], _U],
350
+ valuefunc: Callable[[_T], _V],
351
+ reducefunc: Callable[[Iterable[_V]], _W],
352
+ ) -> Iterable[tuple[_U, _W]]: ...
353
+
354
+ class numeric_range(Generic[_T, _U], Sequence[_T], Hashable, Reversible[_T]):
355
+ @overload
356
+ def __init__(self, __stop: _T) -> None: ...
357
+ @overload
358
+ def __init__(self, __start: _T, __stop: _T) -> None: ...
359
+ @overload
360
+ def __init__(self, __start: _T, __stop: _T, __step: _U) -> None: ...
361
+ def __bool__(self) -> bool: ...
362
+ def __contains__(self, elem: object) -> bool: ...
363
+ def __eq__(self, other: object) -> bool: ...
364
+ @overload
365
+ def __getitem__(self, key: int) -> _T: ...
366
+ @overload
367
+ def __getitem__(self, key: slice) -> numeric_range[_T, _U]: ...
368
+ def __hash__(self) -> int: ...
369
+ def __iter__(self) -> Iterator[_T]: ...
370
+ def __len__(self) -> int: ...
371
+ def __reduce__(
372
+ self,
373
+ ) -> tuple[Type[numeric_range[_T, _U]], tuple[_T, _T, _U]]: ...
374
+ def __repr__(self) -> str: ...
375
+ def __reversed__(self) -> Iterator[_T]: ...
376
+ def count(self, value: _T) -> int: ...
377
+ def index(self, value: _T) -> int: ... # type: ignore
378
+
379
+ def count_cycle(
380
+ iterable: Iterable[_T], n: int | None = ...
381
+ ) -> Iterable[tuple[int, _T]]: ...
382
+ def mark_ends(
383
+ iterable: Iterable[_T],
384
+ ) -> Iterable[tuple[bool, bool, _T]]: ...
385
+ def locate(
386
+ iterable: Iterable[_T],
387
+ pred: Callable[..., Any] = ...,
388
+ window_size: int | None = ...,
389
+ ) -> Iterator[int]: ...
390
+ def lstrip(
391
+ iterable: Iterable[_T], pred: Callable[[_T], object]
392
+ ) -> Iterator[_T]: ...
393
+ def rstrip(
394
+ iterable: Iterable[_T], pred: Callable[[_T], object]
395
+ ) -> Iterator[_T]: ...
396
+ def strip(
397
+ iterable: Iterable[_T], pred: Callable[[_T], object]
398
+ ) -> Iterator[_T]: ...
399
+
400
+ class islice_extended(Generic[_T], Iterator[_T]):
401
+ def __init__(self, iterable: Iterable[_T], *args: int | None) -> None: ...
402
+ def __iter__(self) -> islice_extended[_T]: ...
403
+ def __next__(self) -> _T: ...
404
+ def __getitem__(self, index: slice) -> islice_extended[_T]: ...
405
+
406
+ def always_reversible(iterable: Iterable[_T]) -> Iterator[_T]: ...
407
+ def consecutive_groups(
408
+ iterable: Iterable[_T], ordering: Callable[[_T], int] = ...
409
+ ) -> Iterator[Iterator[_T]]: ...
410
+ @overload
411
+ def difference(
412
+ iterable: Iterable[_T],
413
+ func: Callable[[_T, _T], _U] = ...,
414
+ *,
415
+ initial: None = ...,
416
+ ) -> Iterator[_T | _U]: ...
417
+ @overload
418
+ def difference(
419
+ iterable: Iterable[_T], func: Callable[[_T, _T], _U] = ..., *, initial: _U
420
+ ) -> Iterator[_U]: ...
421
+
422
+ class SequenceView(Generic[_T], Sequence[_T]):
423
+ def __init__(self, target: Sequence[_T]) -> None: ...
424
+ @overload
425
+ def __getitem__(self, index: int) -> _T: ...
426
+ @overload
427
+ def __getitem__(self, index: slice) -> Sequence[_T]: ...
428
+ def __len__(self) -> int: ...
429
+
430
+ class seekable(Generic[_T], Iterator[_T]):
431
+ def __init__(
432
+ self, iterable: Iterable[_T], maxlen: int | None = ...
433
+ ) -> None: ...
434
+ def __iter__(self) -> seekable[_T]: ...
435
+ def __next__(self) -> _T: ...
436
+ def __bool__(self) -> bool: ...
437
+ @overload
438
+ def peek(self) -> _T: ...
439
+ @overload
440
+ def peek(self, default: _U) -> _T | _U: ...
441
+ def elements(self) -> SequenceView[_T]: ...
442
+ def seek(self, index: int) -> None: ...
443
+ def relative_seek(self, count: int) -> None: ...
444
+
445
+ class run_length:
446
+ @staticmethod
447
+ def encode(iterable: Iterable[_T]) -> Iterator[tuple[_T, int]]: ...
448
+ @staticmethod
449
+ def decode(iterable: Iterable[tuple[_T, int]]) -> Iterator[_T]: ...
450
+
451
+ def exactly_n(
452
+ iterable: Iterable[_T], n: int, predicate: Callable[[_T], object] = ...
453
+ ) -> bool: ...
454
+ def circular_shifts(iterable: Iterable[_T]) -> list[tuple[_T, ...]]: ...
455
+ def make_decorator(
456
+ wrapping_func: Callable[..., _U], result_index: int = ...
457
+ ) -> Callable[..., Callable[[Callable[..., Any]], Callable[..., _U]]]: ...
458
+ @overload
459
+ def map_reduce(
460
+ iterable: Iterable[_T],
461
+ keyfunc: Callable[[_T], _U],
462
+ valuefunc: None = ...,
463
+ reducefunc: None = ...,
464
+ ) -> dict[_U, list[_T]]: ...
465
+ @overload
466
+ def map_reduce(
467
+ iterable: Iterable[_T],
468
+ keyfunc: Callable[[_T], _U],
469
+ valuefunc: Callable[[_T], _V],
470
+ reducefunc: None = ...,
471
+ ) -> dict[_U, list[_V]]: ...
472
+ @overload
473
+ def map_reduce(
474
+ iterable: Iterable[_T],
475
+ keyfunc: Callable[[_T], _U],
476
+ valuefunc: None = ...,
477
+ reducefunc: Callable[[list[_T]], _W] = ...,
478
+ ) -> dict[_U, _W]: ...
479
+ @overload
480
+ def map_reduce(
481
+ iterable: Iterable[_T],
482
+ keyfunc: Callable[[_T], _U],
483
+ valuefunc: Callable[[_T], _V],
484
+ reducefunc: Callable[[list[_V]], _W],
485
+ ) -> dict[_U, _W]: ...
486
+ def rlocate(
487
+ iterable: Iterable[_T],
488
+ pred: Callable[..., object] = ...,
489
+ window_size: int | None = ...,
490
+ ) -> Iterator[int]: ...
491
+ def replace(
492
+ iterable: Iterable[_T],
493
+ pred: Callable[..., object],
494
+ substitutes: Iterable[_U],
495
+ count: int | None = ...,
496
+ window_size: int = ...,
497
+ ) -> Iterator[_T | _U]: ...
498
+ def partitions(iterable: Iterable[_T]) -> Iterator[list[list[_T]]]: ...
499
+ def set_partitions(
500
+ iterable: Iterable[_T], k: int | None = ...
501
+ ) -> Iterator[list[list[_T]]]: ...
502
+
503
+ class time_limited(Generic[_T], Iterator[_T]):
504
+ def __init__(
505
+ self, limit_seconds: float, iterable: Iterable[_T]
506
+ ) -> None: ...
507
+ def __iter__(self) -> islice_extended[_T]: ...
508
+ def __next__(self) -> _T: ...
509
+
510
+ @overload
511
+ def only(
512
+ iterable: Iterable[_T], *, too_long: _Raisable | None = ...
513
+ ) -> _T | None: ...
514
+ @overload
515
+ def only(
516
+ iterable: Iterable[_T], default: _U, too_long: _Raisable | None = ...
517
+ ) -> _T | _U: ...
518
+ def ichunked(iterable: Iterable[_T], n: int) -> Iterator[Iterator[_T]]: ...
519
+ def distinct_combinations(
520
+ iterable: Iterable[_T], r: int
521
+ ) -> Iterator[tuple[_T, ...]]: ...
522
+ def filter_except(
523
+ validator: Callable[[Any], object],
524
+ iterable: Iterable[_T],
525
+ *exceptions: Type[BaseException],
526
+ ) -> Iterator[_T]: ...
527
+ def map_except(
528
+ function: Callable[[Any], _U],
529
+ iterable: Iterable[_T],
530
+ *exceptions: Type[BaseException],
531
+ ) -> Iterator[_U]: ...
532
+ def map_if(
533
+ iterable: Iterable[Any],
534
+ pred: Callable[[Any], bool],
535
+ func: Callable[[Any], Any],
536
+ func_else: Callable[[Any], Any] | None = ...,
537
+ ) -> Iterator[Any]: ...
538
+ def sample(
539
+ iterable: Iterable[_T],
540
+ k: int,
541
+ weights: Iterable[float] | None = ...,
542
+ ) -> list[_T]: ...
543
+ def is_sorted(
544
+ iterable: Iterable[_T],
545
+ key: Callable[[_T], _U] | None = ...,
546
+ reverse: bool = False,
547
+ strict: bool = False,
548
+ ) -> bool: ...
549
+
550
+ class AbortThread(BaseException):
551
+ pass
552
+
553
+ class callback_iter(Generic[_T], Iterator[_T]):
554
+ def __init__(
555
+ self,
556
+ func: Callable[..., Any],
557
+ callback_kwd: str = ...,
558
+ wait_seconds: float = ...,
559
+ ) -> None: ...
560
+ def __enter__(self) -> callback_iter[_T]: ...
561
+ def __exit__(
562
+ self,
563
+ exc_type: Type[BaseException] | None,
564
+ exc_value: BaseException | None,
565
+ traceback: TracebackType | None,
566
+ ) -> bool | None: ...
567
+ def __iter__(self) -> callback_iter[_T]: ...
568
+ def __next__(self) -> _T: ...
569
+ def _reader(self) -> Iterator[_T]: ...
570
+ @property
571
+ def done(self) -> bool: ...
572
+ @property
573
+ def result(self) -> Any: ...
574
+
575
+ def windowed_complete(
576
+ iterable: Iterable[_T], n: int
577
+ ) -> Iterator[tuple[_T, ...]]: ...
578
+ def all_unique(
579
+ iterable: Iterable[_T], key: Callable[[_T], _U] | None = ...
580
+ ) -> bool: ...
581
+ def nth_product(index: int, *args: Iterable[_T]) -> tuple[_T, ...]: ...
582
+ def nth_combination_with_replacement(
583
+ iterable: Iterable[_T], r: int, index: int
584
+ ) -> tuple[_T, ...]: ...
585
+ def nth_permutation(
586
+ iterable: Iterable[_T], r: int, index: int
587
+ ) -> tuple[_T, ...]: ...
588
+ def value_chain(*args: _T | Iterable[_T]) -> Iterable[_T]: ...
589
+ def product_index(element: Iterable[_T], *args: Iterable[_T]) -> int: ...
590
+ def combination_index(
591
+ element: Iterable[_T], iterable: Iterable[_T]
592
+ ) -> int: ...
593
+ def combination_with_replacement_index(
594
+ element: Iterable[_T], iterable: Iterable[_T]
595
+ ) -> int: ...
596
+ def permutation_index(
597
+ element: Iterable[_T], iterable: Iterable[_T]
598
+ ) -> int: ...
599
+ def repeat_each(iterable: Iterable[_T], n: int = ...) -> Iterator[_T]: ...
600
+
601
+ class countable(Generic[_T], Iterator[_T]):
602
+ def __init__(self, iterable: Iterable[_T]) -> None: ...
603
+ def __iter__(self) -> countable[_T]: ...
604
+ def __next__(self) -> _T: ...
605
+
606
+ def chunked_even(iterable: Iterable[_T], n: int) -> Iterator[list[_T]]: ...
607
+ def zip_broadcast(
608
+ *objects: _T | Iterable[_T],
609
+ scalar_types: type | tuple[type | tuple[Any, ...], ...] | None = ...,
610
+ strict: bool = ...,
611
+ ) -> Iterable[tuple[_T, ...]]: ...
612
+ def unique_in_window(
613
+ iterable: Iterable[_T], n: int, key: Callable[[_T], _U] | None = ...
614
+ ) -> Iterator[_T]: ...
615
+ def duplicates_everseen(
616
+ iterable: Iterable[_T], key: Callable[[_T], _U] | None = ...
617
+ ) -> Iterator[_T]: ...
618
+ def duplicates_justseen(
619
+ iterable: Iterable[_T], key: Callable[[_T], _U] | None = ...
620
+ ) -> Iterator[_T]: ...
621
+ def classify_unique(
622
+ iterable: Iterable[_T], key: Callable[[_T], _U] | None = ...
623
+ ) -> Iterator[tuple[_T, bool, bool]]: ...
624
+
625
+ class _SupportsLessThan(Protocol):
626
+ def __lt__(self, __other: Any) -> bool: ...
627
+
628
+ _SupportsLessThanT = TypeVar("_SupportsLessThanT", bound=_SupportsLessThan)
629
+
630
+ @overload
631
+ def minmax(
632
+ iterable_or_value: Iterable[_SupportsLessThanT], *, key: None = None
633
+ ) -> tuple[_SupportsLessThanT, _SupportsLessThanT]: ...
634
+ @overload
635
+ def minmax(
636
+ iterable_or_value: Iterable[_T], *, key: Callable[[_T], _SupportsLessThan]
637
+ ) -> tuple[_T, _T]: ...
638
+ @overload
639
+ def minmax(
640
+ iterable_or_value: Iterable[_SupportsLessThanT],
641
+ *,
642
+ key: None = None,
643
+ default: _U,
644
+ ) -> _U | tuple[_SupportsLessThanT, _SupportsLessThanT]: ...
645
+ @overload
646
+ def minmax(
647
+ iterable_or_value: Iterable[_T],
648
+ *,
649
+ key: Callable[[_T], _SupportsLessThan],
650
+ default: _U,
651
+ ) -> _U | tuple[_T, _T]: ...
652
+ @overload
653
+ def minmax(
654
+ iterable_or_value: _SupportsLessThanT,
655
+ __other: _SupportsLessThanT,
656
+ *others: _SupportsLessThanT,
657
+ ) -> tuple[_SupportsLessThanT, _SupportsLessThanT]: ...
658
+ @overload
659
+ def minmax(
660
+ iterable_or_value: _T,
661
+ __other: _T,
662
+ *others: _T,
663
+ key: Callable[[_T], _SupportsLessThan],
664
+ ) -> tuple[_T, _T]: ...
665
+ def longest_common_prefix(
666
+ iterables: Iterable[Iterable[_T]],
667
+ ) -> Iterator[_T]: ...
668
+ def iequals(*iterables: Iterable[Any]) -> bool: ...
669
+ def constrained_batches(
670
+ iterable: Iterable[_T],
671
+ max_size: int,
672
+ max_count: int | None = ...,
673
+ get_len: Callable[[_T], object] = ...,
674
+ strict: bool = ...,
675
+ ) -> Iterator[tuple[_T]]: ...
676
+ def gray_product(*iterables: Iterable[_T]) -> Iterator[tuple[_T, ...]]: ...
677
+ def partial_product(*iterables: Iterable[_T]) -> Iterator[tuple[_T, ...]]: ...
678
+ def takewhile_inclusive(
679
+ predicate: Callable[[_T], bool], iterable: Iterable[_T]
680
+ ) -> Iterator[_T]: ...
681
+ def outer_product(
682
+ func: Callable[[_T, _U], _V],
683
+ xs: Iterable[_T],
684
+ ys: Iterable[_U],
685
+ *args: Any,
686
+ **kwargs: Any,
687
+ ) -> Iterator[tuple[_V, ...]]: ...
688
+ def iter_suppress(
689
+ iterable: Iterable[_T],
690
+ *exceptions: Type[BaseException],
691
+ ) -> Iterator[_T]: ...
692
+ def filter_map(
693
+ func: Callable[[_T], _V | None],
694
+ iterable: Iterable[_T],
695
+ ) -> Iterator[_V]: ...
venv/lib/python3.10/site-packages/more_itertools/py.typed ADDED
File without changes
venv/lib/python3.10/site-packages/more_itertools/recipes.py ADDED
@@ -0,0 +1,1012 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Imported from the recipes section of the itertools documentation.
2
+
3
+ All functions taken from the recipes section of the itertools library docs
4
+ [1]_.
5
+ Some backward-compatible usability improvements have been made.
6
+
7
+ .. [1] http://docs.python.org/library/itertools.html#recipes
8
+
9
+ """
10
+ import math
11
+ import operator
12
+
13
+ from collections import deque
14
+ from collections.abc import Sized
15
+ from functools import partial, reduce
16
+ from itertools import (
17
+ chain,
18
+ combinations,
19
+ compress,
20
+ count,
21
+ cycle,
22
+ groupby,
23
+ islice,
24
+ product,
25
+ repeat,
26
+ starmap,
27
+ tee,
28
+ zip_longest,
29
+ )
30
+ from random import randrange, sample, choice
31
+ from sys import hexversion
32
+
33
+ __all__ = [
34
+ 'all_equal',
35
+ 'batched',
36
+ 'before_and_after',
37
+ 'consume',
38
+ 'convolve',
39
+ 'dotproduct',
40
+ 'first_true',
41
+ 'factor',
42
+ 'flatten',
43
+ 'grouper',
44
+ 'iter_except',
45
+ 'iter_index',
46
+ 'matmul',
47
+ 'ncycles',
48
+ 'nth',
49
+ 'nth_combination',
50
+ 'padnone',
51
+ 'pad_none',
52
+ 'pairwise',
53
+ 'partition',
54
+ 'polynomial_eval',
55
+ 'polynomial_from_roots',
56
+ 'polynomial_derivative',
57
+ 'powerset',
58
+ 'prepend',
59
+ 'quantify',
60
+ 'reshape',
61
+ 'random_combination_with_replacement',
62
+ 'random_combination',
63
+ 'random_permutation',
64
+ 'random_product',
65
+ 'repeatfunc',
66
+ 'roundrobin',
67
+ 'sieve',
68
+ 'sliding_window',
69
+ 'subslices',
70
+ 'sum_of_squares',
71
+ 'tabulate',
72
+ 'tail',
73
+ 'take',
74
+ 'totient',
75
+ 'transpose',
76
+ 'triplewise',
77
+ 'unique_everseen',
78
+ 'unique_justseen',
79
+ ]
80
+
81
+ _marker = object()
82
+
83
+
84
+ # zip with strict is available for Python 3.10+
85
+ try:
86
+ zip(strict=True)
87
+ except TypeError:
88
+ _zip_strict = zip
89
+ else:
90
+ _zip_strict = partial(zip, strict=True)
91
+
92
+ # math.sumprod is available for Python 3.12+
93
+ _sumprod = getattr(math, 'sumprod', lambda x, y: dotproduct(x, y))
94
+
95
+
96
+ def take(n, iterable):
97
+ """Return first *n* items of the iterable as a list.
98
+
99
+ >>> take(3, range(10))
100
+ [0, 1, 2]
101
+
102
+ If there are fewer than *n* items in the iterable, all of them are
103
+ returned.
104
+
105
+ >>> take(10, range(3))
106
+ [0, 1, 2]
107
+
108
+ """
109
+ return list(islice(iterable, n))
110
+
111
+
112
+ def tabulate(function, start=0):
113
+ """Return an iterator over the results of ``func(start)``,
114
+ ``func(start + 1)``, ``func(start + 2)``...
115
+
116
+ *func* should be a function that accepts one integer argument.
117
+
118
+ If *start* is not specified it defaults to 0. It will be incremented each
119
+ time the iterator is advanced.
120
+
121
+ >>> square = lambda x: x ** 2
122
+ >>> iterator = tabulate(square, -3)
123
+ >>> take(4, iterator)
124
+ [9, 4, 1, 0]
125
+
126
+ """
127
+ return map(function, count(start))
128
+
129
+
130
+ def tail(n, iterable):
131
+ """Return an iterator over the last *n* items of *iterable*.
132
+
133
+ >>> t = tail(3, 'ABCDEFG')
134
+ >>> list(t)
135
+ ['E', 'F', 'G']
136
+
137
+ """
138
+ # If the given iterable has a length, then we can use islice to get its
139
+ # final elements. Note that if the iterable is not actually Iterable,
140
+ # either islice or deque will throw a TypeError. This is why we don't
141
+ # check if it is Iterable.
142
+ if isinstance(iterable, Sized):
143
+ yield from islice(iterable, max(0, len(iterable) - n), None)
144
+ else:
145
+ yield from iter(deque(iterable, maxlen=n))
146
+
147
+
148
+ def consume(iterator, n=None):
149
+ """Advance *iterable* by *n* steps. If *n* is ``None``, consume it
150
+ entirely.
151
+
152
+ Efficiently exhausts an iterator without returning values. Defaults to
153
+ consuming the whole iterator, but an optional second argument may be
154
+ provided to limit consumption.
155
+
156
+ >>> i = (x for x in range(10))
157
+ >>> next(i)
158
+ 0
159
+ >>> consume(i, 3)
160
+ >>> next(i)
161
+ 4
162
+ >>> consume(i)
163
+ >>> next(i)
164
+ Traceback (most recent call last):
165
+ File "<stdin>", line 1, in <module>
166
+ StopIteration
167
+
168
+ If the iterator has fewer items remaining than the provided limit, the
169
+ whole iterator will be consumed.
170
+
171
+ >>> i = (x for x in range(3))
172
+ >>> consume(i, 5)
173
+ >>> next(i)
174
+ Traceback (most recent call last):
175
+ File "<stdin>", line 1, in <module>
176
+ StopIteration
177
+
178
+ """
179
+ # Use functions that consume iterators at C speed.
180
+ if n is None:
181
+ # feed the entire iterator into a zero-length deque
182
+ deque(iterator, maxlen=0)
183
+ else:
184
+ # advance to the empty slice starting at position n
185
+ next(islice(iterator, n, n), None)
186
+
187
+
188
+ def nth(iterable, n, default=None):
189
+ """Returns the nth item or a default value.
190
+
191
+ >>> l = range(10)
192
+ >>> nth(l, 3)
193
+ 3
194
+ >>> nth(l, 20, "zebra")
195
+ 'zebra'
196
+
197
+ """
198
+ return next(islice(iterable, n, None), default)
199
+
200
+
201
+ def all_equal(iterable):
202
+ """
203
+ Returns ``True`` if all the elements are equal to each other.
204
+
205
+ >>> all_equal('aaaa')
206
+ True
207
+ >>> all_equal('aaab')
208
+ False
209
+
210
+ """
211
+ g = groupby(iterable)
212
+ return next(g, True) and not next(g, False)
213
+
214
+
215
+ def quantify(iterable, pred=bool):
216
+ """Return the how many times the predicate is true.
217
+
218
+ >>> quantify([True, False, True])
219
+ 2
220
+
221
+ """
222
+ return sum(map(pred, iterable))
223
+
224
+
225
+ def pad_none(iterable):
226
+ """Returns the sequence of elements and then returns ``None`` indefinitely.
227
+
228
+ >>> take(5, pad_none(range(3)))
229
+ [0, 1, 2, None, None]
230
+
231
+ Useful for emulating the behavior of the built-in :func:`map` function.
232
+
233
+ See also :func:`padded`.
234
+
235
+ """
236
+ return chain(iterable, repeat(None))
237
+
238
+
239
+ padnone = pad_none
240
+
241
+
242
+ def ncycles(iterable, n):
243
+ """Returns the sequence elements *n* times
244
+
245
+ >>> list(ncycles(["a", "b"], 3))
246
+ ['a', 'b', 'a', 'b', 'a', 'b']
247
+
248
+ """
249
+ return chain.from_iterable(repeat(tuple(iterable), n))
250
+
251
+
252
+ def dotproduct(vec1, vec2):
253
+ """Returns the dot product of the two iterables.
254
+
255
+ >>> dotproduct([10, 10], [20, 20])
256
+ 400
257
+
258
+ """
259
+ return sum(map(operator.mul, vec1, vec2))
260
+
261
+
262
+ def flatten(listOfLists):
263
+ """Return an iterator flattening one level of nesting in a list of lists.
264
+
265
+ >>> list(flatten([[0, 1], [2, 3]]))
266
+ [0, 1, 2, 3]
267
+
268
+ See also :func:`collapse`, which can flatten multiple levels of nesting.
269
+
270
+ """
271
+ return chain.from_iterable(listOfLists)
272
+
273
+
274
+ def repeatfunc(func, times=None, *args):
275
+ """Call *func* with *args* repeatedly, returning an iterable over the
276
+ results.
277
+
278
+ If *times* is specified, the iterable will terminate after that many
279
+ repetitions:
280
+
281
+ >>> from operator import add
282
+ >>> times = 4
283
+ >>> args = 3, 5
284
+ >>> list(repeatfunc(add, times, *args))
285
+ [8, 8, 8, 8]
286
+
287
+ If *times* is ``None`` the iterable will not terminate:
288
+
289
+ >>> from random import randrange
290
+ >>> times = None
291
+ >>> args = 1, 11
292
+ >>> take(6, repeatfunc(randrange, times, *args)) # doctest:+SKIP
293
+ [2, 4, 8, 1, 8, 4]
294
+
295
+ """
296
+ if times is None:
297
+ return starmap(func, repeat(args))
298
+ return starmap(func, repeat(args, times))
299
+
300
+
301
+ def _pairwise(iterable):
302
+ """Returns an iterator of paired items, overlapping, from the original
303
+
304
+ >>> take(4, pairwise(count()))
305
+ [(0, 1), (1, 2), (2, 3), (3, 4)]
306
+
307
+ On Python 3.10 and above, this is an alias for :func:`itertools.pairwise`.
308
+
309
+ """
310
+ a, b = tee(iterable)
311
+ next(b, None)
312
+ return zip(a, b)
313
+
314
+
315
+ try:
316
+ from itertools import pairwise as itertools_pairwise
317
+ except ImportError:
318
+ pairwise = _pairwise
319
+ else:
320
+
321
+ def pairwise(iterable):
322
+ return itertools_pairwise(iterable)
323
+
324
+ pairwise.__doc__ = _pairwise.__doc__
325
+
326
+
327
+ class UnequalIterablesError(ValueError):
328
+ def __init__(self, details=None):
329
+ msg = 'Iterables have different lengths'
330
+ if details is not None:
331
+ msg += (': index 0 has length {}; index {} has length {}').format(
332
+ *details
333
+ )
334
+
335
+ super().__init__(msg)
336
+
337
+
338
+ def _zip_equal_generator(iterables):
339
+ for combo in zip_longest(*iterables, fillvalue=_marker):
340
+ for val in combo:
341
+ if val is _marker:
342
+ raise UnequalIterablesError()
343
+ yield combo
344
+
345
+
346
+ def _zip_equal(*iterables):
347
+ # Check whether the iterables are all the same size.
348
+ try:
349
+ first_size = len(iterables[0])
350
+ for i, it in enumerate(iterables[1:], 1):
351
+ size = len(it)
352
+ if size != first_size:
353
+ raise UnequalIterablesError(details=(first_size, i, size))
354
+ # All sizes are equal, we can use the built-in zip.
355
+ return zip(*iterables)
356
+ # If any one of the iterables didn't have a length, start reading
357
+ # them until one runs out.
358
+ except TypeError:
359
+ return _zip_equal_generator(iterables)
360
+
361
+
362
+ def grouper(iterable, n, incomplete='fill', fillvalue=None):
363
+ """Group elements from *iterable* into fixed-length groups of length *n*.
364
+
365
+ >>> list(grouper('ABCDEF', 3))
366
+ [('A', 'B', 'C'), ('D', 'E', 'F')]
367
+
368
+ The keyword arguments *incomplete* and *fillvalue* control what happens for
369
+ iterables whose length is not a multiple of *n*.
370
+
371
+ When *incomplete* is `'fill'`, the last group will contain instances of
372
+ *fillvalue*.
373
+
374
+ >>> list(grouper('ABCDEFG', 3, incomplete='fill', fillvalue='x'))
375
+ [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')]
376
+
377
+ When *incomplete* is `'ignore'`, the last group will not be emitted.
378
+
379
+ >>> list(grouper('ABCDEFG', 3, incomplete='ignore', fillvalue='x'))
380
+ [('A', 'B', 'C'), ('D', 'E', 'F')]
381
+
382
+ When *incomplete* is `'strict'`, a subclass of `ValueError` will be raised.
383
+
384
+ >>> it = grouper('ABCDEFG', 3, incomplete='strict')
385
+ >>> list(it) # doctest: +IGNORE_EXCEPTION_DETAIL
386
+ Traceback (most recent call last):
387
+ ...
388
+ UnequalIterablesError
389
+
390
+ """
391
+ args = [iter(iterable)] * n
392
+ if incomplete == 'fill':
393
+ return zip_longest(*args, fillvalue=fillvalue)
394
+ if incomplete == 'strict':
395
+ return _zip_equal(*args)
396
+ if incomplete == 'ignore':
397
+ return zip(*args)
398
+ else:
399
+ raise ValueError('Expected fill, strict, or ignore')
400
+
401
+
402
+ def roundrobin(*iterables):
403
+ """Yields an item from each iterable, alternating between them.
404
+
405
+ >>> list(roundrobin('ABC', 'D', 'EF'))
406
+ ['A', 'D', 'E', 'B', 'F', 'C']
407
+
408
+ This function produces the same output as :func:`interleave_longest`, but
409
+ may perform better for some inputs (in particular when the number of
410
+ iterables is small).
411
+
412
+ """
413
+ # Recipe credited to George Sakkis
414
+ pending = len(iterables)
415
+ nexts = cycle(iter(it).__next__ for it in iterables)
416
+ while pending:
417
+ try:
418
+ for next in nexts:
419
+ yield next()
420
+ except StopIteration:
421
+ pending -= 1
422
+ nexts = cycle(islice(nexts, pending))
423
+
424
+
425
+ def partition(pred, iterable):
426
+ """
427
+ Returns a 2-tuple of iterables derived from the input iterable.
428
+ The first yields the items that have ``pred(item) == False``.
429
+ The second yields the items that have ``pred(item) == True``.
430
+
431
+ >>> is_odd = lambda x: x % 2 != 0
432
+ >>> iterable = range(10)
433
+ >>> even_items, odd_items = partition(is_odd, iterable)
434
+ >>> list(even_items), list(odd_items)
435
+ ([0, 2, 4, 6, 8], [1, 3, 5, 7, 9])
436
+
437
+ If *pred* is None, :func:`bool` is used.
438
+
439
+ >>> iterable = [0, 1, False, True, '', ' ']
440
+ >>> false_items, true_items = partition(None, iterable)
441
+ >>> list(false_items), list(true_items)
442
+ ([0, False, ''], [1, True, ' '])
443
+
444
+ """
445
+ if pred is None:
446
+ pred = bool
447
+
448
+ t1, t2, p = tee(iterable, 3)
449
+ p1, p2 = tee(map(pred, p))
450
+ return (compress(t1, map(operator.not_, p1)), compress(t2, p2))
451
+
452
+
453
+ def powerset(iterable):
454
+ """Yields all possible subsets of the iterable.
455
+
456
+ >>> list(powerset([1, 2, 3]))
457
+ [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
458
+
459
+ :func:`powerset` will operate on iterables that aren't :class:`set`
460
+ instances, so repeated elements in the input will produce repeated elements
461
+ in the output. Use :func:`unique_everseen` on the input to avoid generating
462
+ duplicates:
463
+
464
+ >>> seq = [1, 1, 0]
465
+ >>> list(powerset(seq))
466
+ [(), (1,), (1,), (0,), (1, 1), (1, 0), (1, 0), (1, 1, 0)]
467
+ >>> from more_itertools import unique_everseen
468
+ >>> list(powerset(unique_everseen(seq)))
469
+ [(), (1,), (0,), (1, 0)]
470
+
471
+ """
472
+ s = list(iterable)
473
+ return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
474
+
475
+
476
+ def unique_everseen(iterable, key=None):
477
+ """
478
+ Yield unique elements, preserving order.
479
+
480
+ >>> list(unique_everseen('AAAABBBCCDAABBB'))
481
+ ['A', 'B', 'C', 'D']
482
+ >>> list(unique_everseen('ABBCcAD', str.lower))
483
+ ['A', 'B', 'C', 'D']
484
+
485
+ Sequences with a mix of hashable and unhashable items can be used.
486
+ The function will be slower (i.e., `O(n^2)`) for unhashable items.
487
+
488
+ Remember that ``list`` objects are unhashable - you can use the *key*
489
+ parameter to transform the list to a tuple (which is hashable) to
490
+ avoid a slowdown.
491
+
492
+ >>> iterable = ([1, 2], [2, 3], [1, 2])
493
+ >>> list(unique_everseen(iterable)) # Slow
494
+ [[1, 2], [2, 3]]
495
+ >>> list(unique_everseen(iterable, key=tuple)) # Faster
496
+ [[1, 2], [2, 3]]
497
+
498
+ Similarly, you may want to convert unhashable ``set`` objects with
499
+ ``key=frozenset``. For ``dict`` objects,
500
+ ``key=lambda x: frozenset(x.items())`` can be used.
501
+
502
+ """
503
+ seenset = set()
504
+ seenset_add = seenset.add
505
+ seenlist = []
506
+ seenlist_add = seenlist.append
507
+ use_key = key is not None
508
+
509
+ for element in iterable:
510
+ k = key(element) if use_key else element
511
+ try:
512
+ if k not in seenset:
513
+ seenset_add(k)
514
+ yield element
515
+ except TypeError:
516
+ if k not in seenlist:
517
+ seenlist_add(k)
518
+ yield element
519
+
520
+
521
+ def unique_justseen(iterable, key=None):
522
+ """Yields elements in order, ignoring serial duplicates
523
+
524
+ >>> list(unique_justseen('AAAABBBCCDAABBB'))
525
+ ['A', 'B', 'C', 'D', 'A', 'B']
526
+ >>> list(unique_justseen('ABBCcAD', str.lower))
527
+ ['A', 'B', 'C', 'A', 'D']
528
+
529
+ """
530
+ if key is None:
531
+ return map(operator.itemgetter(0), groupby(iterable))
532
+
533
+ return map(next, map(operator.itemgetter(1), groupby(iterable, key)))
534
+
535
+
536
+ def iter_except(func, exception, first=None):
537
+ """Yields results from a function repeatedly until an exception is raised.
538
+
539
+ Converts a call-until-exception interface to an iterator interface.
540
+ Like ``iter(func, sentinel)``, but uses an exception instead of a sentinel
541
+ to end the loop.
542
+
543
+ >>> l = [0, 1, 2]
544
+ >>> list(iter_except(l.pop, IndexError))
545
+ [2, 1, 0]
546
+
547
+ Multiple exceptions can be specified as a stopping condition:
548
+
549
+ >>> l = [1, 2, 3, '...', 4, 5, 6]
550
+ >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
551
+ [7, 6, 5]
552
+ >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
553
+ [4, 3, 2]
554
+ >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
555
+ []
556
+
557
+ """
558
+ try:
559
+ if first is not None:
560
+ yield first()
561
+ while 1:
562
+ yield func()
563
+ except exception:
564
+ pass
565
+
566
+
567
+ def first_true(iterable, default=None, pred=None):
568
+ """
569
+ Returns the first true value in the iterable.
570
+
571
+ If no true value is found, returns *default*
572
+
573
+ If *pred* is not None, returns the first item for which
574
+ ``pred(item) == True`` .
575
+
576
+ >>> first_true(range(10))
577
+ 1
578
+ >>> first_true(range(10), pred=lambda x: x > 5)
579
+ 6
580
+ >>> first_true(range(10), default='missing', pred=lambda x: x > 9)
581
+ 'missing'
582
+
583
+ """
584
+ return next(filter(pred, iterable), default)
585
+
586
+
587
+ def random_product(*args, repeat=1):
588
+ """Draw an item at random from each of the input iterables.
589
+
590
+ >>> random_product('abc', range(4), 'XYZ') # doctest:+SKIP
591
+ ('c', 3, 'Z')
592
+
593
+ If *repeat* is provided as a keyword argument, that many items will be
594
+ drawn from each iterable.
595
+
596
+ >>> random_product('abcd', range(4), repeat=2) # doctest:+SKIP
597
+ ('a', 2, 'd', 3)
598
+
599
+ This equivalent to taking a random selection from
600
+ ``itertools.product(*args, **kwarg)``.
601
+
602
+ """
603
+ pools = [tuple(pool) for pool in args] * repeat
604
+ return tuple(choice(pool) for pool in pools)
605
+
606
+
607
+ def random_permutation(iterable, r=None):
608
+ """Return a random *r* length permutation of the elements in *iterable*.
609
+
610
+ If *r* is not specified or is ``None``, then *r* defaults to the length of
611
+ *iterable*.
612
+
613
+ >>> random_permutation(range(5)) # doctest:+SKIP
614
+ (3, 4, 0, 1, 2)
615
+
616
+ This equivalent to taking a random selection from
617
+ ``itertools.permutations(iterable, r)``.
618
+
619
+ """
620
+ pool = tuple(iterable)
621
+ r = len(pool) if r is None else r
622
+ return tuple(sample(pool, r))
623
+
624
+
625
+ def random_combination(iterable, r):
626
+ """Return a random *r* length subsequence of the elements in *iterable*.
627
+
628
+ >>> random_combination(range(5), 3) # doctest:+SKIP
629
+ (2, 3, 4)
630
+
631
+ This equivalent to taking a random selection from
632
+ ``itertools.combinations(iterable, r)``.
633
+
634
+ """
635
+ pool = tuple(iterable)
636
+ n = len(pool)
637
+ indices = sorted(sample(range(n), r))
638
+ return tuple(pool[i] for i in indices)
639
+
640
+
641
+ def random_combination_with_replacement(iterable, r):
642
+ """Return a random *r* length subsequence of elements in *iterable*,
643
+ allowing individual elements to be repeated.
644
+
645
+ >>> random_combination_with_replacement(range(3), 5) # doctest:+SKIP
646
+ (0, 0, 1, 2, 2)
647
+
648
+ This equivalent to taking a random selection from
649
+ ``itertools.combinations_with_replacement(iterable, r)``.
650
+
651
+ """
652
+ pool = tuple(iterable)
653
+ n = len(pool)
654
+ indices = sorted(randrange(n) for i in range(r))
655
+ return tuple(pool[i] for i in indices)
656
+
657
+
658
+ def nth_combination(iterable, r, index):
659
+ """Equivalent to ``list(combinations(iterable, r))[index]``.
660
+
661
+ The subsequences of *iterable* that are of length *r* can be ordered
662
+ lexicographically. :func:`nth_combination` computes the subsequence at
663
+ sort position *index* directly, without computing the previous
664
+ subsequences.
665
+
666
+ >>> nth_combination(range(5), 3, 5)
667
+ (0, 3, 4)
668
+
669
+ ``ValueError`` will be raised If *r* is negative or greater than the length
670
+ of *iterable*.
671
+ ``IndexError`` will be raised if the given *index* is invalid.
672
+ """
673
+ pool = tuple(iterable)
674
+ n = len(pool)
675
+ if (r < 0) or (r > n):
676
+ raise ValueError
677
+
678
+ c = 1
679
+ k = min(r, n - r)
680
+ for i in range(1, k + 1):
681
+ c = c * (n - k + i) // i
682
+
683
+ if index < 0:
684
+ index += c
685
+
686
+ if (index < 0) or (index >= c):
687
+ raise IndexError
688
+
689
+ result = []
690
+ while r:
691
+ c, n, r = c * r // n, n - 1, r - 1
692
+ while index >= c:
693
+ index -= c
694
+ c, n = c * (n - r) // n, n - 1
695
+ result.append(pool[-1 - n])
696
+
697
+ return tuple(result)
698
+
699
+
700
+ def prepend(value, iterator):
701
+ """Yield *value*, followed by the elements in *iterator*.
702
+
703
+ >>> value = '0'
704
+ >>> iterator = ['1', '2', '3']
705
+ >>> list(prepend(value, iterator))
706
+ ['0', '1', '2', '3']
707
+
708
+ To prepend multiple values, see :func:`itertools.chain`
709
+ or :func:`value_chain`.
710
+
711
+ """
712
+ return chain([value], iterator)
713
+
714
+
715
+ def convolve(signal, kernel):
716
+ """Convolve the iterable *signal* with the iterable *kernel*.
717
+
718
+ >>> signal = (1, 2, 3, 4, 5)
719
+ >>> kernel = [3, 2, 1]
720
+ >>> list(convolve(signal, kernel))
721
+ [3, 8, 14, 20, 26, 14, 5]
722
+
723
+ Note: the input arguments are not interchangeable, as the *kernel*
724
+ is immediately consumed and stored.
725
+
726
+ """
727
+ # This implementation intentionally doesn't match the one in the itertools
728
+ # documentation.
729
+ kernel = tuple(kernel)[::-1]
730
+ n = len(kernel)
731
+ window = deque([0], maxlen=n) * n
732
+ for x in chain(signal, repeat(0, n - 1)):
733
+ window.append(x)
734
+ yield _sumprod(kernel, window)
735
+
736
+
737
+ def before_and_after(predicate, it):
738
+ """A variant of :func:`takewhile` that allows complete access to the
739
+ remainder of the iterator.
740
+
741
+ >>> it = iter('ABCdEfGhI')
742
+ >>> all_upper, remainder = before_and_after(str.isupper, it)
743
+ >>> ''.join(all_upper)
744
+ 'ABC'
745
+ >>> ''.join(remainder) # takewhile() would lose the 'd'
746
+ 'dEfGhI'
747
+
748
+ Note that the first iterator must be fully consumed before the second
749
+ iterator can generate valid results.
750
+ """
751
+ it = iter(it)
752
+ transition = []
753
+
754
+ def true_iterator():
755
+ for elem in it:
756
+ if predicate(elem):
757
+ yield elem
758
+ else:
759
+ transition.append(elem)
760
+ return
761
+
762
+ # Note: this is different from itertools recipes to allow nesting
763
+ # before_and_after remainders into before_and_after again. See tests
764
+ # for an example.
765
+ remainder_iterator = chain(transition, it)
766
+
767
+ return true_iterator(), remainder_iterator
768
+
769
+
770
+ def triplewise(iterable):
771
+ """Return overlapping triplets from *iterable*.
772
+
773
+ >>> list(triplewise('ABCDE'))
774
+ [('A', 'B', 'C'), ('B', 'C', 'D'), ('C', 'D', 'E')]
775
+
776
+ """
777
+ for (a, _), (b, c) in pairwise(pairwise(iterable)):
778
+ yield a, b, c
779
+
780
+
781
+ def sliding_window(iterable, n):
782
+ """Return a sliding window of width *n* over *iterable*.
783
+
784
+ >>> list(sliding_window(range(6), 4))
785
+ [(0, 1, 2, 3), (1, 2, 3, 4), (2, 3, 4, 5)]
786
+
787
+ If *iterable* has fewer than *n* items, then nothing is yielded:
788
+
789
+ >>> list(sliding_window(range(3), 4))
790
+ []
791
+
792
+ For a variant with more features, see :func:`windowed`.
793
+ """
794
+ it = iter(iterable)
795
+ window = deque(islice(it, n - 1), maxlen=n)
796
+ for x in it:
797
+ window.append(x)
798
+ yield tuple(window)
799
+
800
+
801
+ def subslices(iterable):
802
+ """Return all contiguous non-empty subslices of *iterable*.
803
+
804
+ >>> list(subslices('ABC'))
805
+ [['A'], ['A', 'B'], ['A', 'B', 'C'], ['B'], ['B', 'C'], ['C']]
806
+
807
+ This is similar to :func:`substrings`, but emits items in a different
808
+ order.
809
+ """
810
+ seq = list(iterable)
811
+ slices = starmap(slice, combinations(range(len(seq) + 1), 2))
812
+ return map(operator.getitem, repeat(seq), slices)
813
+
814
+
815
+ def polynomial_from_roots(roots):
816
+ """Compute a polynomial's coefficients from its roots.
817
+
818
+ >>> roots = [5, -4, 3] # (x - 5) * (x + 4) * (x - 3)
819
+ >>> polynomial_from_roots(roots) # x^3 - 4 * x^2 - 17 * x + 60
820
+ [1, -4, -17, 60]
821
+ """
822
+ factors = zip(repeat(1), map(operator.neg, roots))
823
+ return list(reduce(convolve, factors, [1]))
824
+
825
+
826
+ def iter_index(iterable, value, start=0, stop=None):
827
+ """Yield the index of each place in *iterable* that *value* occurs,
828
+ beginning with index *start* and ending before index *stop*.
829
+
830
+ See :func:`locate` for a more general means of finding the indexes
831
+ associated with particular values.
832
+
833
+ >>> list(iter_index('AABCADEAF', 'A'))
834
+ [0, 1, 4, 7]
835
+ >>> list(iter_index('AABCADEAF', 'A', 1)) # start index is inclusive
836
+ [1, 4, 7]
837
+ >>> list(iter_index('AABCADEAF', 'A', 1, 7)) # stop index is not inclusive
838
+ [1, 4]
839
+ """
840
+ seq_index = getattr(iterable, 'index', None)
841
+ if seq_index is None:
842
+ # Slow path for general iterables
843
+ it = islice(iterable, start, stop)
844
+ for i, element in enumerate(it, start):
845
+ if element is value or element == value:
846
+ yield i
847
+ else:
848
+ # Fast path for sequences
849
+ stop = len(iterable) if stop is None else stop
850
+ i = start - 1
851
+ try:
852
+ while True:
853
+ yield (i := seq_index(value, i + 1, stop))
854
+ except ValueError:
855
+ pass
856
+
857
+
858
+ def sieve(n):
859
+ """Yield the primes less than n.
860
+
861
+ >>> list(sieve(30))
862
+ [2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
863
+ """
864
+ if n > 2:
865
+ yield 2
866
+ start = 3
867
+ data = bytearray((0, 1)) * (n // 2)
868
+ limit = math.isqrt(n) + 1
869
+ for p in iter_index(data, 1, start, limit):
870
+ yield from iter_index(data, 1, start, p * p)
871
+ data[p * p : n : p + p] = bytes(len(range(p * p, n, p + p)))
872
+ start = p * p
873
+ yield from iter_index(data, 1, start)
874
+
875
+
876
+ def _batched(iterable, n, *, strict=False):
877
+ """Batch data into tuples of length *n*. If the number of items in
878
+ *iterable* is not divisible by *n*:
879
+ * The last batch will be shorter if *strict* is ``False``.
880
+ * :exc:`ValueError` will be raised if *strict* is ``True``.
881
+
882
+ >>> list(batched('ABCDEFG', 3))
883
+ [('A', 'B', 'C'), ('D', 'E', 'F'), ('G',)]
884
+
885
+ On Python 3.13 and above, this is an alias for :func:`itertools.batched`.
886
+ """
887
+ if n < 1:
888
+ raise ValueError('n must be at least one')
889
+ it = iter(iterable)
890
+ while batch := tuple(islice(it, n)):
891
+ if strict and len(batch) != n:
892
+ raise ValueError('batched(): incomplete batch')
893
+ yield batch
894
+
895
+
896
+ if hexversion >= 0x30D00A2:
897
+ from itertools import batched as itertools_batched
898
+
899
+ def batched(iterable, n, *, strict=False):
900
+ return itertools_batched(iterable, n, strict=strict)
901
+
902
+ else:
903
+ batched = _batched
904
+
905
+ batched.__doc__ = _batched.__doc__
906
+
907
+
908
+ def transpose(it):
909
+ """Swap the rows and columns of the input matrix.
910
+
911
+ >>> list(transpose([(1, 2, 3), (11, 22, 33)]))
912
+ [(1, 11), (2, 22), (3, 33)]
913
+
914
+ The caller should ensure that the dimensions of the input are compatible.
915
+ If the input is empty, no output will be produced.
916
+ """
917
+ return _zip_strict(*it)
918
+
919
+
920
+ def reshape(matrix, cols):
921
+ """Reshape the 2-D input *matrix* to have a column count given by *cols*.
922
+
923
+ >>> matrix = [(0, 1), (2, 3), (4, 5)]
924
+ >>> cols = 3
925
+ >>> list(reshape(matrix, cols))
926
+ [(0, 1, 2), (3, 4, 5)]
927
+ """
928
+ return batched(chain.from_iterable(matrix), cols)
929
+
930
+
931
+ def matmul(m1, m2):
932
+ """Multiply two matrices.
933
+
934
+ >>> list(matmul([(7, 5), (3, 5)], [(2, 5), (7, 9)]))
935
+ [(49, 80), (41, 60)]
936
+
937
+ The caller should ensure that the dimensions of the input matrices are
938
+ compatible with each other.
939
+ """
940
+ n = len(m2[0])
941
+ return batched(starmap(_sumprod, product(m1, transpose(m2))), n)
942
+
943
+
944
+ def factor(n):
945
+ """Yield the prime factors of n.
946
+
947
+ >>> list(factor(360))
948
+ [2, 2, 2, 3, 3, 5]
949
+ """
950
+ for prime in sieve(math.isqrt(n) + 1):
951
+ while not n % prime:
952
+ yield prime
953
+ n //= prime
954
+ if n == 1:
955
+ return
956
+ if n > 1:
957
+ yield n
958
+
959
+
960
+ def polynomial_eval(coefficients, x):
961
+ """Evaluate a polynomial at a specific value.
962
+
963
+ Example: evaluating x^3 - 4 * x^2 - 17 * x + 60 at x = 2.5:
964
+
965
+ >>> coefficients = [1, -4, -17, 60]
966
+ >>> x = 2.5
967
+ >>> polynomial_eval(coefficients, x)
968
+ 8.125
969
+ """
970
+ n = len(coefficients)
971
+ if n == 0:
972
+ return x * 0 # coerce zero to the type of x
973
+ powers = map(pow, repeat(x), reversed(range(n)))
974
+ return _sumprod(coefficients, powers)
975
+
976
+
977
+ def sum_of_squares(it):
978
+ """Return the sum of the squares of the input values.
979
+
980
+ >>> sum_of_squares([10, 20, 30])
981
+ 1400
982
+ """
983
+ return _sumprod(*tee(it))
984
+
985
+
986
+ def polynomial_derivative(coefficients):
987
+ """Compute the first derivative of a polynomial.
988
+
989
+ Example: evaluating the derivative of x^3 - 4 * x^2 - 17 * x + 60
990
+
991
+ >>> coefficients = [1, -4, -17, 60]
992
+ >>> derivative_coefficients = polynomial_derivative(coefficients)
993
+ >>> derivative_coefficients
994
+ [3, -8, -17]
995
+ """
996
+ n = len(coefficients)
997
+ powers = reversed(range(1, n))
998
+ return list(map(operator.mul, coefficients, powers))
999
+
1000
+
1001
+ def totient(n):
1002
+ """Return the count of natural numbers up to *n* that are coprime with *n*.
1003
+
1004
+ >>> totient(9)
1005
+ 6
1006
+ >>> totient(12)
1007
+ 4
1008
+ """
1009
+ for p in unique_justseen(factor(n)):
1010
+ n = n // p * (p - 1)
1011
+
1012
+ return n
venv/lib/python3.10/site-packages/more_itertools/recipes.pyi ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Stubs for more_itertools.recipes"""
2
+ from __future__ import annotations
3
+
4
+ from typing import (
5
+ Any,
6
+ Callable,
7
+ Iterable,
8
+ Iterator,
9
+ overload,
10
+ Sequence,
11
+ Type,
12
+ TypeVar,
13
+ )
14
+
15
+ # Type and type variable definitions
16
+ _T = TypeVar('_T')
17
+ _T1 = TypeVar('_T1')
18
+ _T2 = TypeVar('_T2')
19
+ _U = TypeVar('_U')
20
+
21
+ def take(n: int, iterable: Iterable[_T]) -> list[_T]: ...
22
+ def tabulate(
23
+ function: Callable[[int], _T], start: int = ...
24
+ ) -> Iterator[_T]: ...
25
+ def tail(n: int, iterable: Iterable[_T]) -> Iterator[_T]: ...
26
+ def consume(iterator: Iterable[_T], n: int | None = ...) -> None: ...
27
+ @overload
28
+ def nth(iterable: Iterable[_T], n: int) -> _T | None: ...
29
+ @overload
30
+ def nth(iterable: Iterable[_T], n: int, default: _U) -> _T | _U: ...
31
+ def all_equal(iterable: Iterable[_T]) -> bool: ...
32
+ def quantify(
33
+ iterable: Iterable[_T], pred: Callable[[_T], bool] = ...
34
+ ) -> int: ...
35
+ def pad_none(iterable: Iterable[_T]) -> Iterator[_T | None]: ...
36
+ def padnone(iterable: Iterable[_T]) -> Iterator[_T | None]: ...
37
+ def ncycles(iterable: Iterable[_T], n: int) -> Iterator[_T]: ...
38
+ def dotproduct(vec1: Iterable[_T1], vec2: Iterable[_T2]) -> Any: ...
39
+ def flatten(listOfLists: Iterable[Iterable[_T]]) -> Iterator[_T]: ...
40
+ def repeatfunc(
41
+ func: Callable[..., _U], times: int | None = ..., *args: Any
42
+ ) -> Iterator[_U]: ...
43
+ def pairwise(iterable: Iterable[_T]) -> Iterator[tuple[_T, _T]]: ...
44
+ def grouper(
45
+ iterable: Iterable[_T],
46
+ n: int,
47
+ incomplete: str = ...,
48
+ fillvalue: _U = ...,
49
+ ) -> Iterator[tuple[_T | _U, ...]]: ...
50
+ def roundrobin(*iterables: Iterable[_T]) -> Iterator[_T]: ...
51
+ def partition(
52
+ pred: Callable[[_T], object] | None, iterable: Iterable[_T]
53
+ ) -> tuple[Iterator[_T], Iterator[_T]]: ...
54
+ def powerset(iterable: Iterable[_T]) -> Iterator[tuple[_T, ...]]: ...
55
+ def unique_everseen(
56
+ iterable: Iterable[_T], key: Callable[[_T], _U] | None = ...
57
+ ) -> Iterator[_T]: ...
58
+ def unique_justseen(
59
+ iterable: Iterable[_T], key: Callable[[_T], object] | None = ...
60
+ ) -> Iterator[_T]: ...
61
+ @overload
62
+ def iter_except(
63
+ func: Callable[[], _T],
64
+ exception: Type[BaseException] | tuple[Type[BaseException], ...],
65
+ first: None = ...,
66
+ ) -> Iterator[_T]: ...
67
+ @overload
68
+ def iter_except(
69
+ func: Callable[[], _T],
70
+ exception: Type[BaseException] | tuple[Type[BaseException], ...],
71
+ first: Callable[[], _U],
72
+ ) -> Iterator[_T | _U]: ...
73
+ @overload
74
+ def first_true(
75
+ iterable: Iterable[_T], *, pred: Callable[[_T], object] | None = ...
76
+ ) -> _T | None: ...
77
+ @overload
78
+ def first_true(
79
+ iterable: Iterable[_T],
80
+ default: _U,
81
+ pred: Callable[[_T], object] | None = ...,
82
+ ) -> _T | _U: ...
83
+ def random_product(
84
+ *args: Iterable[_T], repeat: int = ...
85
+ ) -> tuple[_T, ...]: ...
86
+ def random_permutation(
87
+ iterable: Iterable[_T], r: int | None = ...
88
+ ) -> tuple[_T, ...]: ...
89
+ def random_combination(iterable: Iterable[_T], r: int) -> tuple[_T, ...]: ...
90
+ def random_combination_with_replacement(
91
+ iterable: Iterable[_T], r: int
92
+ ) -> tuple[_T, ...]: ...
93
+ def nth_combination(
94
+ iterable: Iterable[_T], r: int, index: int
95
+ ) -> tuple[_T, ...]: ...
96
+ def prepend(value: _T, iterator: Iterable[_U]) -> Iterator[_T | _U]: ...
97
+ def convolve(signal: Iterable[_T], kernel: Iterable[_T]) -> Iterator[_T]: ...
98
+ def before_and_after(
99
+ predicate: Callable[[_T], bool], it: Iterable[_T]
100
+ ) -> tuple[Iterator[_T], Iterator[_T]]: ...
101
+ def triplewise(iterable: Iterable[_T]) -> Iterator[tuple[_T, _T, _T]]: ...
102
+ def sliding_window(
103
+ iterable: Iterable[_T], n: int
104
+ ) -> Iterator[tuple[_T, ...]]: ...
105
+ def subslices(iterable: Iterable[_T]) -> Iterator[list[_T]]: ...
106
+ def polynomial_from_roots(roots: Sequence[_T]) -> list[_T]: ...
107
+ def iter_index(
108
+ iterable: Iterable[_T],
109
+ value: Any,
110
+ start: int | None = ...,
111
+ stop: int | None = ...,
112
+ ) -> Iterator[int]: ...
113
+ def sieve(n: int) -> Iterator[int]: ...
114
+ def batched(
115
+ iterable: Iterable[_T], n: int, *, strict: bool = False
116
+ ) -> Iterator[tuple[_T]]: ...
117
+ def transpose(
118
+ it: Iterable[Iterable[_T]],
119
+ ) -> Iterator[tuple[_T, ...]]: ...
120
+ def reshape(
121
+ matrix: Iterable[Iterable[_T]], cols: int
122
+ ) -> Iterator[tuple[_T, ...]]: ...
123
+ def matmul(m1: Sequence[_T], m2: Sequence[_T]) -> Iterator[tuple[_T]]: ...
124
+ def factor(n: int) -> Iterator[int]: ...
125
+ def polynomial_eval(coefficients: Sequence[_T], x: _U) -> _U: ...
126
+ def sum_of_squares(it: Iterable[_T]) -> _T: ...
127
+ def polynomial_derivative(coefficients: Sequence[_T]) -> list[_T]: ...
128
+ def totient(n: int) -> int: ...
venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/License.txt ADDED
@@ -0,0 +1,1568 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ End User License Agreement
2
+ --------------------------
3
+
4
+
5
+ Preface
6
+ -------
7
+
8
+ The Software License Agreement in Chapter 1 and the Supplement
9
+ in Chapter 2 contain license terms and conditions that govern
10
+ the use of NVIDIA software. By accepting this agreement, you
11
+ agree to comply with all the terms and conditions applicable
12
+ to the product(s) included herein.
13
+
14
+
15
+ NVIDIA Driver
16
+
17
+
18
+ Description
19
+
20
+ This package contains the operating system driver and
21
+ fundamental system software components for NVIDIA GPUs.
22
+
23
+
24
+ NVIDIA CUDA Toolkit
25
+
26
+
27
+ Description
28
+
29
+ The NVIDIA CUDA Toolkit provides command-line and graphical
30
+ tools for building, debugging and optimizing the performance
31
+ of applications accelerated by NVIDIA GPUs, runtime and math
32
+ libraries, and documentation including programming guides,
33
+ user manuals, and API references.
34
+
35
+
36
+ Default Install Location of CUDA Toolkit
37
+
38
+ Windows platform:
39
+
40
+ %ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v#.#
41
+
42
+ Linux platform:
43
+
44
+ /usr/local/cuda-#.#
45
+
46
+ Mac platform:
47
+
48
+ /Developer/NVIDIA/CUDA-#.#
49
+
50
+
51
+ NVIDIA CUDA Samples
52
+
53
+
54
+ Description
55
+
56
+ This package includes over 100+ CUDA examples that demonstrate
57
+ various CUDA programming principles, and efficient CUDA
58
+ implementation of algorithms in specific application domains.
59
+
60
+
61
+ Default Install Location of CUDA Samples
62
+
63
+ Windows platform:
64
+
65
+ %ProgramData%\NVIDIA Corporation\CUDA Samples\v#.#
66
+
67
+ Linux platform:
68
+
69
+ /usr/local/cuda-#.#/samples
70
+
71
+ and
72
+
73
+ $HOME/NVIDIA_CUDA-#.#_Samples
74
+
75
+ Mac platform:
76
+
77
+ /Developer/NVIDIA/CUDA-#.#/samples
78
+
79
+
80
+ NVIDIA Nsight Visual Studio Edition (Windows only)
81
+
82
+
83
+ Description
84
+
85
+ NVIDIA Nsight Development Platform, Visual Studio Edition is a
86
+ development environment integrated into Microsoft Visual
87
+ Studio that provides tools for debugging, profiling, analyzing
88
+ and optimizing your GPU computing and graphics applications.
89
+
90
+
91
+ Default Install Location of Nsight Visual Studio Edition
92
+
93
+ Windows platform:
94
+
95
+ %ProgramFiles(x86)%\NVIDIA Corporation\Nsight Visual Studio Edition #.#
96
+
97
+
98
+ 1. License Agreement for NVIDIA Software Development Kits
99
+ ---------------------------------------------------------
100
+
101
+
102
+ Release Date: July 26, 2018
103
+ ---------------------------
104
+
105
+
106
+ Important NoticeRead before downloading, installing,
107
+ copying or using the licensed software:
108
+ -------------------------------------------------------
109
+
110
+ This license agreement, including exhibits attached
111
+ ("Agreement”) is a legal agreement between you and NVIDIA
112
+ Corporation ("NVIDIA") and governs your use of a NVIDIA
113
+ software development kit (“SDK”).
114
+
115
+ Each SDK has its own set of software and materials, but here
116
+ is a description of the types of items that may be included in
117
+ a SDK: source code, header files, APIs, data sets and assets
118
+ (examples include images, textures, models, scenes, videos,
119
+ native API input/output files), binary software, sample code,
120
+ libraries, utility programs, programming code and
121
+ documentation.
122
+
123
+ This Agreement can be accepted only by an adult of legal age
124
+ of majority in the country in which the SDK is used.
125
+
126
+ If you are entering into this Agreement on behalf of a company
127
+ or other legal entity, you represent that you have the legal
128
+ authority to bind the entity to this Agreement, in which case
129
+ “you” will mean the entity you represent.
130
+
131
+ If you don’t have the required age or authority to accept
132
+ this Agreement, or if you don’t accept all the terms and
133
+ conditions of this Agreement, do not download, install or use
134
+ the SDK.
135
+
136
+ You agree to use the SDK only for purposes that are permitted
137
+ by (a) this Agreement, and (b) any applicable law, regulation
138
+ or generally accepted practices or guidelines in the relevant
139
+ jurisdictions.
140
+
141
+
142
+ 1.1. License
143
+
144
+
145
+ 1.1.1. License Grant
146
+
147
+ Subject to the terms of this Agreement, NVIDIA hereby grants
148
+ you a non-exclusive, non-transferable license, without the
149
+ right to sublicense (except as expressly provided in this
150
+ Agreement) to:
151
+
152
+ 1. Install and use the SDK,
153
+
154
+ 2. Modify and create derivative works of sample source code
155
+ delivered in the SDK, and
156
+
157
+ 3. Distribute those portions of the SDK that are identified
158
+ in this Agreement as distributable, as incorporated in
159
+ object code format into a software application that meets
160
+ the distribution requirements indicated in this Agreement.
161
+
162
+
163
+ 1.1.2. Distribution Requirements
164
+
165
+ These are the distribution requirements for you to exercise
166
+ the distribution grant:
167
+
168
+ 1. Your application must have material additional
169
+ functionality, beyond the included portions of the SDK.
170
+
171
+ 2. The distributable portions of the SDK shall only be
172
+ accessed by your application.
173
+
174
+ 3. The following notice shall be included in modifications
175
+ and derivative works of sample source code distributed:
176
+ “This software contains source code provided by NVIDIA
177
+ Corporation.”
178
+
179
+ 4. Unless a developer tool is identified in this Agreement
180
+ as distributable, it is delivered for your internal use
181
+ only.
182
+
183
+ 5. The terms under which you distribute your application
184
+ must be consistent with the terms of this Agreement,
185
+ including (without limitation) terms relating to the
186
+ license grant and license restrictions and protection of
187
+ NVIDIA’s intellectual property rights. Additionally, you
188
+ agree that you will protect the privacy, security and
189
+ legal rights of your application users.
190
+
191
+ 6. You agree to notify NVIDIA in writing of any known or
192
+ suspected distribution or use of the SDK not in compliance
193
+ with the requirements of this Agreement, and to enforce
194
+ the terms of your agreements with respect to distributed
195
+ SDK.
196
+
197
+
198
+ 1.1.3. Authorized Users
199
+
200
+ You may allow employees and contractors of your entity or of
201
+ your subsidiary(ies) to access and use the SDK from your
202
+ secure network to perform work on your behalf.
203
+
204
+ If you are an academic institution you may allow users
205
+ enrolled or employed by the academic institution to access and
206
+ use the SDK from your secure network.
207
+
208
+ You are responsible for the compliance with the terms of this
209
+ Agreement by your authorized users. If you become aware that
210
+ your authorized users didn’t follow the terms of this
211
+ Agreement, you agree to take reasonable steps to resolve the
212
+ non-compliance and prevent new occurrences.
213
+
214
+
215
+ 1.1.4. Pre-Release SDK
216
+
217
+ The SDK versions identified as alpha, beta, preview or
218
+ otherwise as pre-release, may not be fully functional, may
219
+ contain errors or design flaws, and may have reduced or
220
+ different security, privacy, accessibility, availability, and
221
+ reliability standards relative to commercial versions of
222
+ NVIDIA software and materials. Use of a pre-release SDK may
223
+ result in unexpected results, loss of data, project delays or
224
+ other unpredictable damage or loss.
225
+
226
+ You may use a pre-release SDK at your own risk, understanding
227
+ that pre-release SDKs are not intended for use in production
228
+ or business-critical systems.
229
+
230
+ NVIDIA may choose not to make available a commercial version
231
+ of any pre-release SDK. NVIDIA may also choose to abandon
232
+ development and terminate the availability of a pre-release
233
+ SDK at any time without liability.
234
+
235
+
236
+ 1.1.5. Updates
237
+
238
+ NVIDIA may, at its option, make available patches, workarounds
239
+ or other updates to this SDK. Unless the updates are provided
240
+ with their separate governing terms, they are deemed part of
241
+ the SDK licensed to you as provided in this Agreement. You
242
+ agree that the form and content of the SDK that NVIDIA
243
+ provides may change without prior notice to you. While NVIDIA
244
+ generally maintains compatibility between versions, NVIDIA may
245
+ in some cases make changes that introduce incompatibilities in
246
+ future versions of the SDK.
247
+
248
+
249
+ 1.1.6. Third Party Licenses
250
+
251
+ The SDK may come bundled with, or otherwise include or be
252
+ distributed with, third party software licensed by a NVIDIA
253
+ supplier and/or open source software provided under an open
254
+ source license. Use of third party software is subject to the
255
+ third-party license terms, or in the absence of third party
256
+ terms, the terms of this Agreement. Copyright to third party
257
+ software is held by the copyright holders indicated in the
258
+ third-party software or license.
259
+
260
+
261
+ 1.1.7. Reservation of Rights
262
+
263
+ NVIDIA reserves all rights, title, and interest in and to the
264
+ SDK, not expressly granted to you under this Agreement.
265
+
266
+
267
+ 1.2. Limitations
268
+
269
+ The following license limitations apply to your use of the
270
+ SDK:
271
+
272
+ 1. You may not reverse engineer, decompile or disassemble,
273
+ or remove copyright or other proprietary notices from any
274
+ portion of the SDK or copies of the SDK.
275
+
276
+ 2. Except as expressly provided in this Agreement, you may
277
+ not copy, sell, rent, sublicense, transfer, distribute,
278
+ modify, or create derivative works of any portion of the
279
+ SDK. For clarity, you may not distribute or sublicense the
280
+ SDK as a stand-alone product.
281
+
282
+ 3. Unless you have an agreement with NVIDIA for this
283
+ purpose, you may not indicate that an application created
284
+ with the SDK is sponsored or endorsed by NVIDIA.
285
+
286
+ 4. You may not bypass, disable, or circumvent any
287
+ encryption, security, digital rights management or
288
+ authentication mechanism in the SDK.
289
+
290
+ 5. You may not use the SDK in any manner that would cause it
291
+ to become subject to an open source software license. As
292
+ examples, licenses that require as a condition of use,
293
+ modification, and/or distribution that the SDK be:
294
+
295
+ a. Disclosed or distributed in source code form;
296
+
297
+ b. Licensed for the purpose of making derivative works;
298
+ or
299
+
300
+ c. Redistributable at no charge.
301
+
302
+ 6. Unless you have an agreement with NVIDIA for this
303
+ purpose, you may not use the SDK with any system or
304
+ application where the use or failure of the system or
305
+ application can reasonably be expected to threaten or
306
+ result in personal injury, death, or catastrophic loss.
307
+ Examples include use in avionics, navigation, military,
308
+ medical, life support or other life critical applications.
309
+ NVIDIA does not design, test or manufacture the SDK for
310
+ these critical uses and NVIDIA shall not be liable to you
311
+ or any third party, in whole or in part, for any claims or
312
+ damages arising from such uses.
313
+
314
+ 7. You agree to defend, indemnify and hold harmless NVIDIA
315
+ and its affiliates, and their respective employees,
316
+ contractors, agents, officers and directors, from and
317
+ against any and all claims, damages, obligations, losses,
318
+ liabilities, costs or debt, fines, restitutions and
319
+ expenses (including but not limited to attorney’s fees
320
+ and costs incident to establishing the right of
321
+ indemnification) arising out of or related to your use of
322
+ the SDK outside of the scope of this Agreement, or not in
323
+ compliance with its terms.
324
+
325
+
326
+ 1.3. Ownership
327
+
328
+ 1. NVIDIA or its licensors hold all rights, title and
329
+ interest in and to the SDK and its modifications and
330
+ derivative works, including their respective intellectual
331
+ property rights, subject to your rights described in this
332
+ section. This SDK may include software and materials from
333
+ NVIDIA’s licensors, and these licensors are intended
334
+ third party beneficiaries that may enforce this Agreement
335
+ with respect to their intellectual property rights.
336
+
337
+ 2. You hold all rights, title and interest in and to your
338
+ applications and your derivative works of the sample
339
+ source code delivered in the SDK, including their
340
+ respective intellectual property rights, subject to
341
+ NVIDIA’s rights described in this section.
342
+
343
+ 3. You may, but don’t have to, provide to NVIDIA
344
+ suggestions, feature requests or other feedback regarding
345
+ the SDK, including possible enhancements or modifications
346
+ to the SDK. For any feedback that you voluntarily provide,
347
+ you hereby grant NVIDIA and its affiliates a perpetual,
348
+ non-exclusive, worldwide, irrevocable license to use,
349
+ reproduce, modify, license, sublicense (through multiple
350
+ tiers of sublicensees), and distribute (through multiple
351
+ tiers of distributors) it without the payment of any
352
+ royalties or fees to you. NVIDIA will use feedback at its
353
+ choice. NVIDIA is constantly looking for ways to improve
354
+ its products, so you may send feedback to NVIDIA through
355
+ the developer portal at https://developer.nvidia.com.
356
+
357
+
358
+ 1.4. No Warranties
359
+
360
+ THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL
361
+ FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND
362
+ ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND
363
+ OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING,
364
+ BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS
365
+ FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE
366
+ ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO
367
+ WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF
368
+ DEALING OR COURSE OF TRADE.
369
+
370
+
371
+ 1.5. Limitation of Liability
372
+
373
+ TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS
374
+ AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL,
375
+ PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS
376
+ OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF
377
+ PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION
378
+ WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK,
379
+ WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH
380
+ OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE),
381
+ PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF
382
+ LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES
383
+ TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS
384
+ AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE
385
+ NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS
386
+ LIMIT.
387
+
388
+ These exclusions and limitations of liability shall apply
389
+ regardless if NVIDIA or its affiliates have been advised of
390
+ the possibility of such damages, and regardless of whether a
391
+ remedy fails its essential purpose. These exclusions and
392
+ limitations of liability form an essential basis of the
393
+ bargain between the parties, and, absent any of these
394
+ exclusions or limitations of liability, the provisions of this
395
+ Agreement, including, without limitation, the economic terms,
396
+ would be substantially different.
397
+
398
+
399
+ 1.6. Termination
400
+
401
+ 1. This Agreement will continue to apply until terminated by
402
+ either you or NVIDIA as described below.
403
+
404
+ 2. If you want to terminate this Agreement, you may do so by
405
+ stopping to use the SDK.
406
+
407
+ 3. NVIDIA may, at any time, terminate this Agreement if:
408
+
409
+ a. (i) you fail to comply with any term of this
410
+ Agreement and the non-compliance is not fixed within
411
+ thirty (30) days following notice from NVIDIA (or
412
+ immediately if you violate NVIDIA’s intellectual
413
+ property rights);
414
+
415
+ b. (ii) you commence or participate in any legal
416
+ proceeding against NVIDIA with respect to the SDK; or
417
+
418
+ c. (iii) NVIDIA decides to no longer provide the SDK in
419
+ a country or, in NVIDIA’s sole discretion, the
420
+ continued use of it is no longer commercially viable.
421
+
422
+ 4. Upon any termination of this Agreement, you agree to
423
+ promptly discontinue use of the SDK and destroy all copies
424
+ in your possession or control. Your prior distributions in
425
+ accordance with this Agreement are not affected by the
426
+ termination of this Agreement. Upon written request, you
427
+ will certify in writing that you have complied with your
428
+ commitments under this section. Upon any termination of
429
+ this Agreement all provisions survive except for the
430
+ license grant provisions.
431
+
432
+
433
+ 1.7. General
434
+
435
+ If you wish to assign this Agreement or your rights and
436
+ obligations, including by merger, consolidation, dissolution
437
+ or operation of law, contact NVIDIA to ask for permission. Any
438
+ attempted assignment not approved by NVIDIA in writing shall
439
+ be void and of no effect. NVIDIA may assign, delegate or
440
+ transfer this Agreement and its rights and obligations, and if
441
+ to a non-affiliate you will be notified.
442
+
443
+ You agree to cooperate with NVIDIA and provide reasonably
444
+ requested information to verify your compliance with this
445
+ Agreement.
446
+
447
+ This Agreement will be governed in all respects by the laws of
448
+ the United States and of the State of Delaware as those laws
449
+ are applied to contracts entered into and performed entirely
450
+ within Delaware by Delaware residents, without regard to the
451
+ conflicts of laws principles. The United Nations Convention on
452
+ Contracts for the International Sale of Goods is specifically
453
+ disclaimed. You agree to all terms of this Agreement in the
454
+ English language.
455
+
456
+ The state or federal courts residing in Santa Clara County,
457
+ California shall have exclusive jurisdiction over any dispute
458
+ or claim arising out of this Agreement. Notwithstanding this,
459
+ you agree that NVIDIA shall still be allowed to apply for
460
+ injunctive remedies or an equivalent type of urgent legal
461
+ relief in any jurisdiction.
462
+
463
+ If any court of competent jurisdiction determines that any
464
+ provision of this Agreement is illegal, invalid or
465
+ unenforceable, such provision will be construed as limited to
466
+ the extent necessary to be consistent with and fully
467
+ enforceable under the law and the remaining provisions will
468
+ remain in full force and effect. Unless otherwise specified,
469
+ remedies are cumulative.
470
+
471
+ Each party acknowledges and agrees that the other is an
472
+ independent contractor in the performance of this Agreement.
473
+
474
+ The SDK has been developed entirely at private expense and is
475
+ “commercial items” consisting of “commercial computer
476
+ software” and “commercial computer software
477
+ documentation” provided with RESTRICTED RIGHTS. Use,
478
+ duplication or disclosure by the U.S. Government or a U.S.
479
+ Government subcontractor is subject to the restrictions in
480
+ this Agreement pursuant to DFARS 227.7202-3(a) or as set forth
481
+ in subparagraphs (c)(1) and (2) of the Commercial Computer
482
+ Software - Restricted Rights clause at FAR 52.227-19, as
483
+ applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas
484
+ Expressway, Santa Clara, CA 95051.
485
+
486
+ The SDK is subject to United States export laws and
487
+ regulations. You agree that you will not ship, transfer or
488
+ export the SDK into any country, or use the SDK in any manner,
489
+ prohibited by the United States Bureau of Industry and
490
+ Security or economic sanctions regulations administered by the
491
+ U.S. Department of Treasury’s Office of Foreign Assets
492
+ Control (OFAC), or any applicable export laws, restrictions or
493
+ regulations. These laws include restrictions on destinations,
494
+ end users and end use. By accepting this Agreement, you
495
+ confirm that you are not a resident or citizen of any country
496
+ currently embargoed by the U.S. and that you are not otherwise
497
+ prohibited from receiving the SDK.
498
+
499
+ Any notice delivered by NVIDIA to you under this Agreement
500
+ will be delivered via mail, email or fax. You agree that any
501
+ notices that NVIDIA sends you electronically will satisfy any
502
+ legal communication requirements. Please direct your legal
503
+ notices or other correspondence to NVIDIA Corporation, 2788
504
+ San Tomas Expressway, Santa Clara, California 95051, United
505
+ States of America, Attention: Legal Department.
506
+
507
+ This Agreement and any exhibits incorporated into this
508
+ Agreement constitute the entire agreement of the parties with
509
+ respect to the subject matter of this Agreement and supersede
510
+ all prior negotiations or documentation exchanged between the
511
+ parties relating to this SDK license. Any additional and/or
512
+ conflicting terms on documents issued by you are null, void,
513
+ and invalid. Any amendment or waiver under this Agreement
514
+ shall be in writing and signed by representatives of both
515
+ parties.
516
+
517
+
518
+ 2. CUDA Toolkit Supplement to Software License Agreement for
519
+ NVIDIA Software Development Kits
520
+ ------------------------------------------------------------
521
+
522
+
523
+ Release date: August 16, 2018
524
+ -----------------------------
525
+
526
+ The terms in this supplement govern your use of the NVIDIA
527
+ CUDA Toolkit SDK under the terms of your license agreement
528
+ (“Agreement”) as modified by this supplement. Capitalized
529
+ terms used but not defined below have the meaning assigned to
530
+ them in the Agreement.
531
+
532
+ This supplement is an exhibit to the Agreement and is
533
+ incorporated as an integral part of the Agreement. In the
534
+ event of conflict between the terms in this supplement and the
535
+ terms in the Agreement, the terms in this supplement govern.
536
+
537
+
538
+ 2.1. License Scope
539
+
540
+ The SDK is licensed for you to develop applications only for
541
+ use in systems with NVIDIA GPUs.
542
+
543
+
544
+ 2.2. Distribution
545
+
546
+ The portions of the SDK that are distributable under the
547
+ Agreement are listed in Attachment A.
548
+
549
+
550
+ 2.3. Operating Systems
551
+
552
+ Those portions of the SDK designed exclusively for use on the
553
+ Linux or FreeBSD operating systems, or other operating systems
554
+ derived from the source code to these operating systems, may
555
+ be copied and redistributed for use in accordance with this
556
+ Agreement, provided that the object code files are not
557
+ modified in any way (except for unzipping of compressed
558
+ files).
559
+
560
+
561
+ 2.4. Audio and Video Encoders and Decoders
562
+
563
+ You acknowledge and agree that it is your sole responsibility
564
+ to obtain any additional third-party licenses required to
565
+ make, have made, use, have used, sell, import, and offer for
566
+ sale your products or services that include or incorporate any
567
+ third-party software and content relating to audio and/or
568
+ video encoders and decoders from, including but not limited
569
+ to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A.,
570
+ MPEG-LA, and Coding Technologies. NVIDIA does not grant to you
571
+ under this Agreement any necessary patent or other rights with
572
+ respect to any audio and/or video encoders and decoders.
573
+
574
+
575
+ 2.5. Licensing
576
+
577
+ If the distribution terms in this Agreement are not suitable
578
+ for your organization, or for any questions regarding this
579
+ Agreement, please contact NVIDIA at
580
581
+
582
+
583
+ 2.6. Attachment A
584
+
585
+ The following portions of the SDK are distributable under the
586
+ Agreement:
587
+
588
+ Component
589
+
590
+ CUDA Runtime
591
+
592
+ Windows
593
+
594
+ cudart.dll, cudart_static.lib, cudadevrt.lib
595
+
596
+ Mac OSX
597
+
598
+ libcudart.dylib, libcudart_static.a, libcudadevrt.a
599
+
600
+ Linux
601
+
602
+ libcudart.so, libcudart_static.a, libcudadevrt.a
603
+
604
+ Android
605
+
606
+ libcudart.so, libcudart_static.a, libcudadevrt.a
607
+
608
+ Component
609
+
610
+ CUDA FFT Library
611
+
612
+ Windows
613
+
614
+ cufft.dll, cufftw.dll, cufft.lib, cufftw.lib
615
+
616
+ Mac OSX
617
+
618
+ libcufft.dylib, libcufft_static.a, libcufftw.dylib,
619
+ libcufftw_static.a
620
+
621
+ Linux
622
+
623
+ libcufft.so, libcufft_static.a, libcufftw.so,
624
+ libcufftw_static.a
625
+
626
+ Android
627
+
628
+ libcufft.so, libcufft_static.a, libcufftw.so,
629
+ libcufftw_static.a
630
+
631
+ Component
632
+
633
+ CUDA BLAS Library
634
+
635
+ Windows
636
+
637
+ cublas.dll, cublasLt.dll
638
+
639
+ Mac OSX
640
+
641
+ libcublas.dylib, libcublasLt.dylib, libcublas_static.a,
642
+ libcublasLt_static.a
643
+
644
+ Linux
645
+
646
+ libcublas.so, libcublasLt.so, libcublas_static.a,
647
+ libcublasLt_static.a
648
+
649
+ Android
650
+
651
+ libcublas.so, libcublasLt.so, libcublas_static.a,
652
+ libcublasLt_static.a
653
+
654
+ Component
655
+
656
+ NVIDIA "Drop-in" BLAS Library
657
+
658
+ Windows
659
+
660
+ nvblas.dll
661
+
662
+ Mac OSX
663
+
664
+ libnvblas.dylib
665
+
666
+ Linux
667
+
668
+ libnvblas.so
669
+
670
+ Component
671
+
672
+ CUDA Sparse Matrix Library
673
+
674
+ Windows
675
+
676
+ cusparse.dll, cusparse.lib
677
+
678
+ Mac OSX
679
+
680
+ libcusparse.dylib, libcusparse_static.a
681
+
682
+ Linux
683
+
684
+ libcusparse.so, libcusparse_static.a
685
+
686
+ Android
687
+
688
+ libcusparse.so, libcusparse_static.a
689
+
690
+ Component
691
+
692
+ CUDA Linear Solver Library
693
+
694
+ Windows
695
+
696
+ cusolver.dll, cusolver.lib
697
+
698
+ Mac OSX
699
+
700
+ libcusolver.dylib, libcusolver_static.a
701
+
702
+ Linux
703
+
704
+ libcusolver.so, libcusolver_static.a
705
+
706
+ Android
707
+
708
+ libcusolver.so, libcusolver_static.a
709
+
710
+ Component
711
+
712
+ CUDA Random Number Generation Library
713
+
714
+ Windows
715
+
716
+ curand.dll, curand.lib
717
+
718
+ Mac OSX
719
+
720
+ libcurand.dylib, libcurand_static.a
721
+
722
+ Linux
723
+
724
+ libcurand.so, libcurand_static.a
725
+
726
+ Android
727
+
728
+ libcurand.so, libcurand_static.a
729
+
730
+ Component
731
+
732
+ CUDA Accelerated Graph Library
733
+
734
+ Component
735
+
736
+ NVIDIA Performance Primitives Library
737
+
738
+ Windows
739
+
740
+ nppc.dll, nppc.lib, nppial.dll, nppial.lib, nppicc.dll,
741
+ nppicc.lib, nppicom.dll, nppicom.lib, nppidei.dll,
742
+ nppidei.lib, nppif.dll, nppif.lib, nppig.dll, nppig.lib,
743
+ nppim.dll, nppim.lib, nppist.dll, nppist.lib, nppisu.dll,
744
+ nppisu.lib, nppitc.dll, nppitc.lib, npps.dll, npps.lib
745
+
746
+ Mac OSX
747
+
748
+ libnppc.dylib, libnppc_static.a, libnppial.dylib,
749
+ libnppial_static.a, libnppicc.dylib, libnppicc_static.a,
750
+ libnppicom.dylib, libnppicom_static.a, libnppidei.dylib,
751
+ libnppidei_static.a, libnppif.dylib, libnppif_static.a,
752
+ libnppig.dylib, libnppig_static.a, libnppim.dylib,
753
+ libnppisu_static.a, libnppitc.dylib, libnppitc_static.a,
754
+ libnpps.dylib, libnpps_static.a
755
+
756
+ Linux
757
+
758
+ libnppc.so, libnppc_static.a, libnppial.so,
759
+ libnppial_static.a, libnppicc.so, libnppicc_static.a,
760
+ libnppicom.so, libnppicom_static.a, libnppidei.so,
761
+ libnppidei_static.a, libnppif.so, libnppif_static.a
762
+ libnppig.so, libnppig_static.a, libnppim.so,
763
+ libnppim_static.a, libnppist.so, libnppist_static.a,
764
+ libnppisu.so, libnppisu_static.a, libnppitc.so
765
+ libnppitc_static.a, libnpps.so, libnpps_static.a
766
+
767
+ Android
768
+
769
+ libnppc.so, libnppc_static.a, libnppial.so,
770
+ libnppial_static.a, libnppicc.so, libnppicc_static.a,
771
+ libnppicom.so, libnppicom_static.a, libnppidei.so,
772
+ libnppidei_static.a, libnppif.so, libnppif_static.a
773
+ libnppig.so, libnppig_static.a, libnppim.so,
774
+ libnppim_static.a, libnppist.so, libnppist_static.a,
775
+ libnppisu.so, libnppisu_static.a, libnppitc.so
776
+ libnppitc_static.a, libnpps.so, libnpps_static.a
777
+
778
+ Component
779
+
780
+ NVIDIA JPEG Library
781
+
782
+ Linux
783
+
784
+ libnvjpeg.so, libnvjpeg_static.a
785
+
786
+ Component
787
+
788
+ Internal common library required for statically linking to
789
+ cuBLAS, cuSPARSE, cuFFT, cuRAND, nvJPEG and NPP
790
+
791
+ Mac OSX
792
+
793
+ libculibos.a
794
+
795
+ Linux
796
+
797
+ libculibos.a
798
+
799
+ Component
800
+
801
+ NVIDIA Runtime Compilation Library and Header
802
+
803
+ All
804
+
805
+ nvrtc.h
806
+
807
+ Windows
808
+
809
+ nvrtc.dll, nvrtc-builtins.dll
810
+
811
+ Mac OSX
812
+
813
+ libnvrtc.dylib, libnvrtc-builtins.dylib
814
+
815
+ Linux
816
+
817
+ libnvrtc.so, libnvrtc-builtins.so
818
+
819
+ Component
820
+
821
+ NVIDIA Optimizing Compiler Library
822
+
823
+ Windows
824
+
825
+ nvvm.dll
826
+
827
+ Mac OSX
828
+
829
+ libnvvm.dylib
830
+
831
+ Linux
832
+
833
+ libnvvm.so
834
+
835
+ Component
836
+
837
+ NVIDIA Common Device Math Functions Library
838
+
839
+ Windows
840
+
841
+ libdevice.10.bc
842
+
843
+ Mac OSX
844
+
845
+ libdevice.10.bc
846
+
847
+ Linux
848
+
849
+ libdevice.10.bc
850
+
851
+ Component
852
+
853
+ CUDA Occupancy Calculation Header Library
854
+
855
+ All
856
+
857
+ cuda_occupancy.h
858
+
859
+ Component
860
+
861
+ CUDA Half Precision Headers
862
+
863
+ All
864
+
865
+ cuda_fp16.h, cuda_fp16.hpp
866
+
867
+ Component
868
+
869
+ CUDA Profiling Tools Interface (CUPTI) Library
870
+
871
+ Windows
872
+
873
+ cupti.dll
874
+
875
+ Mac OSX
876
+
877
+ libcupti.dylib
878
+
879
+ Linux
880
+
881
+ libcupti.so
882
+
883
+ Component
884
+
885
+ NVIDIA Tools Extension Library
886
+
887
+ Windows
888
+
889
+ nvToolsExt.dll, nvToolsExt.lib
890
+
891
+ Mac OSX
892
+
893
+ libnvToolsExt.dylib
894
+
895
+ Linux
896
+
897
+ libnvToolsExt.so
898
+
899
+ Component
900
+
901
+ NVIDIA CUDA Driver Libraries
902
+
903
+ Linux
904
+
905
+ libcuda.so, libnvidia-fatbinaryloader.so,
906
+ libnvidia-ptxjitcompiler.so
907
+
908
+ The NVIDIA CUDA Driver Libraries are only distributable in
909
+ applications that meet this criteria:
910
+
911
+ 1. The application was developed starting from a NVIDIA CUDA
912
+ container obtained from Docker Hub or the NVIDIA GPU
913
+ Cloud, and
914
+
915
+ 2. The resulting application is packaged as a Docker
916
+ container and distributed to users on Docker Hub or the
917
+ NVIDIA GPU Cloud only.
918
+
919
+
920
+ 2.7. Attachment B
921
+
922
+
923
+ Additional Licensing Obligations
924
+
925
+ The following third party components included in the SOFTWARE
926
+ are licensed to Licensee pursuant to the following terms and
927
+ conditions:
928
+
929
+ 1. Licensee's use of the GDB third party component is
930
+ subject to the terms and conditions of GNU GPL v3:
931
+
932
+ This product includes copyrighted third-party software licensed
933
+ under the terms of the GNU General Public License v3 ("GPL v3").
934
+ All third-party software packages are copyright by their respective
935
+ authors. GPL v3 terms and conditions are hereby incorporated into
936
+ the Agreement by this reference: http://www.gnu.org/licenses/gpl.txt
937
+
938
+ Consistent with these licensing requirements, the software
939
+ listed below is provided under the terms of the specified
940
+ open source software licenses. To obtain source code for
941
+ software provided under licenses that require
942
+ redistribution of source code, including the GNU General
943
+ Public License (GPL) and GNU Lesser General Public License
944
+ (LGPL), contact [email protected]. This offer is
945
+ valid for a period of three (3) years from the date of the
946
+ distribution of this product by NVIDIA CORPORATION.
947
+
948
+ Component License
949
+ CUDA-GDB GPL v3
950
+
951
+ 2. Licensee represents and warrants that any and all third
952
+ party licensing and/or royalty payment obligations in
953
+ connection with Licensee's use of the H.264 video codecs
954
+ are solely the responsibility of Licensee.
955
+
956
+ 3. Licensee's use of the Thrust library is subject to the
957
+ terms and conditions of the Apache License Version 2.0.
958
+ All third-party software packages are copyright by their
959
+ respective authors. Apache License Version 2.0 terms and
960
+ conditions are hereby incorporated into the Agreement by
961
+ this reference.
962
+ http://www.apache.org/licenses/LICENSE-2.0.html
963
+
964
+ In addition, Licensee acknowledges the following notice:
965
+ Thrust includes source code from the Boost Iterator,
966
+ Tuple, System, and Random Number libraries.
967
+
968
+ Boost Software License - Version 1.0 - August 17th, 2003
969
+ . . . .
970
+
971
+ Permission is hereby granted, free of charge, to any person or
972
+ organization obtaining a copy of the software and accompanying
973
+ documentation covered by this license (the "Software") to use,
974
+ reproduce, display, distribute, execute, and transmit the Software,
975
+ and to prepare derivative works of the Software, and to permit
976
+ third-parties to whom the Software is furnished to do so, all
977
+ subject to the following:
978
+
979
+ The copyright notices in the Software and this entire statement,
980
+ including the above license grant, this restriction and the following
981
+ disclaimer, must be included in all copies of the Software, in whole
982
+ or in part, and all derivative works of the Software, unless such
983
+ copies or derivative works are solely in the form of machine-executable
984
+ object code generated by a source language processor.
985
+
986
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
987
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
988
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
989
+ NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
990
+ ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
991
+ OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
992
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
993
+ OTHER DEALINGS IN THE SOFTWARE.
994
+
995
+ 4. Licensee's use of the LLVM third party component is
996
+ subject to the following terms and conditions:
997
+
998
+ ======================================================
999
+ LLVM Release License
1000
+ ======================================================
1001
+ University of Illinois/NCSA
1002
+ Open Source License
1003
+
1004
+ Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign.
1005
+ All rights reserved.
1006
+
1007
+ Developed by:
1008
+
1009
+ LLVM Team
1010
+
1011
+ University of Illinois at Urbana-Champaign
1012
+
1013
+ http://llvm.org
1014
+
1015
+ Permission is hereby granted, free of charge, to any person obtaining a copy
1016
+ of this software and associated documentation files (the "Software"), to
1017
+ deal with the Software without restriction, including without limitation the
1018
+ rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
1019
+ sell copies of the Software, and to permit persons to whom the Software is
1020
+ furnished to do so, subject to the following conditions:
1021
+
1022
+ * Redistributions of source code must retain the above copyright notice,
1023
+ this list of conditions and the following disclaimers.
1024
+
1025
+ * Redistributions in binary form must reproduce the above copyright
1026
+ notice, this list of conditions and the following disclaimers in the
1027
+ documentation and/or other materials provided with the distribution.
1028
+
1029
+ * Neither the names of the LLVM Team, University of Illinois at Urbana-
1030
+ Champaign, nor the names of its contributors may be used to endorse or
1031
+ promote products derived from this Software without specific prior
1032
+ written permission.
1033
+
1034
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1035
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1036
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
1037
+ THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
1038
+ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
1039
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
1040
+ DEALINGS WITH THE SOFTWARE.
1041
+
1042
+ 5. Licensee's use (e.g. nvprof) of the PCRE third party
1043
+ component is subject to the following terms and
1044
+ conditions:
1045
+
1046
+ ------------
1047
+ PCRE LICENCE
1048
+ ------------
1049
+ PCRE is a library of functions to support regular expressions whose syntax
1050
+ and semantics are as close as possible to those of the Perl 5 language.
1051
+ Release 8 of PCRE is distributed under the terms of the "BSD" licence, as
1052
+ specified below. The documentation for PCRE, supplied in the "doc"
1053
+ directory, is distributed under the same terms as the software itself. The
1054
+ basic library functions are written in C and are freestanding. Also
1055
+ included in the distribution is a set of C++ wrapper functions, and a just-
1056
+ in-time compiler that can be used to optimize pattern matching. These are
1057
+ both optional features that can be omitted when the library is built.
1058
+
1059
+ THE BASIC LIBRARY FUNCTIONS
1060
+ ---------------------------
1061
+ Written by: Philip Hazel
1062
+ Email local part: ph10
1063
+ Email domain: cam.ac.uk
1064
+ University of Cambridge Computing Service,
1065
+ Cambridge, England.
1066
+ Copyright (c) 1997-2012 University of Cambridge
1067
+ All rights reserved.
1068
+
1069
+ PCRE JUST-IN-TIME COMPILATION SUPPORT
1070
+ -------------------------------------
1071
+ Written by: Zoltan Herczeg
1072
+ Email local part: hzmester
1073
+ Emain domain: freemail.hu
1074
+ Copyright(c) 2010-2012 Zoltan Herczeg
1075
+ All rights reserved.
1076
+
1077
+ STACK-LESS JUST-IN-TIME COMPILER
1078
+ --------------------------------
1079
+ Written by: Zoltan Herczeg
1080
+ Email local part: hzmester
1081
+ Emain domain: freemail.hu
1082
+ Copyright(c) 2009-2012 Zoltan Herczeg
1083
+ All rights reserved.
1084
+
1085
+ THE C++ WRAPPER FUNCTIONS
1086
+ -------------------------
1087
+ Contributed by: Google Inc.
1088
+ Copyright (c) 2007-2012, Google Inc.
1089
+ All rights reserved.
1090
+
1091
+ THE "BSD" LICENCE
1092
+ -----------------
1093
+ Redistribution and use in source and binary forms, with or without
1094
+ modification, are permitted provided that the following conditions are met:
1095
+
1096
+ * Redistributions of source code must retain the above copyright notice,
1097
+ this list of conditions and the following disclaimer.
1098
+
1099
+ * Redistributions in binary form must reproduce the above copyright
1100
+ notice, this list of conditions and the following disclaimer in the
1101
+ documentation and/or other materials provided with the distribution.
1102
+
1103
+ * Neither the name of the University of Cambridge nor the name of Google
1104
+ Inc. nor the names of their contributors may be used to endorse or
1105
+ promote products derived from this software without specific prior
1106
+ written permission.
1107
+
1108
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
1109
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1110
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1111
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
1112
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1113
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
1114
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
1115
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
1116
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
1117
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1118
+ POSSIBILITY OF SUCH DAMAGE.
1119
+
1120
+ 6. Some of the cuBLAS library routines were written by or
1121
+ derived from code written by Vasily Volkov and are subject
1122
+ to the Modified Berkeley Software Distribution License as
1123
+ follows:
1124
+
1125
+ Copyright (c) 2007-2009, Regents of the University of California
1126
+
1127
+ All rights reserved.
1128
+
1129
+ Redistribution and use in source and binary forms, with or without
1130
+ modification, are permitted provided that the following conditions are
1131
+ met:
1132
+ * Redistributions of source code must retain the above copyright
1133
+ notice, this list of conditions and the following disclaimer.
1134
+ * Redistributions in binary form must reproduce the above
1135
+ copyright notice, this list of conditions and the following
1136
+ disclaimer in the documentation and/or other materials provided
1137
+ with the distribution.
1138
+ * Neither the name of the University of California, Berkeley nor
1139
+ the names of its contributors may be used to endorse or promote
1140
+ products derived from this software without specific prior
1141
+ written permission.
1142
+
1143
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
1144
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1145
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1146
+ DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
1147
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1148
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
1149
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
1150
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
1151
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
1152
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1153
+ POSSIBILITY OF SUCH DAMAGE.
1154
+
1155
+ 7. Some of the cuBLAS library routines were written by or
1156
+ derived from code written by Davide Barbieri and are
1157
+ subject to the Modified Berkeley Software Distribution
1158
+ License as follows:
1159
+
1160
+ Copyright (c) 2008-2009 Davide Barbieri @ University of Rome Tor Vergata.
1161
+
1162
+ All rights reserved.
1163
+
1164
+ Redistribution and use in source and binary forms, with or without
1165
+ modification, are permitted provided that the following conditions are
1166
+ met:
1167
+ * Redistributions of source code must retain the above copyright
1168
+ notice, this list of conditions and the following disclaimer.
1169
+ * Redistributions in binary form must reproduce the above
1170
+ copyright notice, this list of conditions and the following
1171
+ disclaimer in the documentation and/or other materials provided
1172
+ with the distribution.
1173
+ * The name of the author may not be used to endorse or promote
1174
+ products derived from this software without specific prior
1175
+ written permission.
1176
+
1177
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
1178
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1179
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1180
+ DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
1181
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1182
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
1183
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
1184
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
1185
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
1186
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1187
+ POSSIBILITY OF SUCH DAMAGE.
1188
+
1189
+ 8. Some of the cuBLAS library routines were derived from
1190
+ code developed by the University of Tennessee and are
1191
+ subject to the Modified Berkeley Software Distribution
1192
+ License as follows:
1193
+
1194
+ Copyright (c) 2010 The University of Tennessee.
1195
+
1196
+ All rights reserved.
1197
+
1198
+ Redistribution and use in source and binary forms, with or without
1199
+ modification, are permitted provided that the following conditions are
1200
+ met:
1201
+ * Redistributions of source code must retain the above copyright
1202
+ notice, this list of conditions and the following disclaimer.
1203
+ * Redistributions in binary form must reproduce the above
1204
+ copyright notice, this list of conditions and the following
1205
+ disclaimer listed in this license in the documentation and/or
1206
+ other materials provided with the distribution.
1207
+ * Neither the name of the copyright holders nor the names of its
1208
+ contributors may be used to endorse or promote products derived
1209
+ from this software without specific prior written permission.
1210
+
1211
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1212
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1213
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1214
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1215
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1216
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1217
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1218
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1219
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1220
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1221
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1222
+
1223
+ 9. Some of the cuBLAS library routines were written by or
1224
+ derived from code written by Jonathan Hogg and are subject
1225
+ to the Modified Berkeley Software Distribution License as
1226
+ follows:
1227
+
1228
+ Copyright (c) 2012, The Science and Technology Facilities Council (STFC).
1229
+
1230
+ All rights reserved.
1231
+
1232
+ Redistribution and use in source and binary forms, with or without
1233
+ modification, are permitted provided that the following conditions are
1234
+ met:
1235
+ * Redistributions of source code must retain the above copyright
1236
+ notice, this list of conditions and the following disclaimer.
1237
+ * Redistributions in binary form must reproduce the above
1238
+ copyright notice, this list of conditions and the following
1239
+ disclaimer in the documentation and/or other materials provided
1240
+ with the distribution.
1241
+ * Neither the name of the STFC nor the names of its contributors
1242
+ may be used to endorse or promote products derived from this
1243
+ software without specific prior written permission.
1244
+
1245
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1246
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1247
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1248
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE STFC BE
1249
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1250
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
1251
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
1252
+ BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
1253
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
1254
+ OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
1255
+ IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1256
+
1257
+ 10. Some of the cuBLAS library routines were written by or
1258
+ derived from code written by Ahmad M. Abdelfattah, David
1259
+ Keyes, and Hatem Ltaief, and are subject to the Apache
1260
+ License, Version 2.0, as follows:
1261
+
1262
+ -- (C) Copyright 2013 King Abdullah University of Science and Technology
1263
+ Authors:
1264
+ Ahmad Abdelfattah ([email protected])
1265
+ David Keyes ([email protected])
1266
+ Hatem Ltaief ([email protected])
1267
+
1268
+ Redistribution and use in source and binary forms, with or without
1269
+ modification, are permitted provided that the following conditions
1270
+ are met:
1271
+
1272
+ * Redistributions of source code must retain the above copyright
1273
+ notice, this list of conditions and the following disclaimer.
1274
+ * Redistributions in binary form must reproduce the above copyright
1275
+ notice, this list of conditions and the following disclaimer in the
1276
+ documentation and/or other materials provided with the distribution.
1277
+ * Neither the name of the King Abdullah University of Science and
1278
+ Technology nor the names of its contributors may be used to endorse
1279
+ or promote products derived from this software without specific prior
1280
+ written permission.
1281
+
1282
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1283
+ ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1284
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1285
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1286
+ HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1287
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1288
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1289
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1290
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1291
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1292
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
1293
+
1294
+ 11. Some of the cuSPARSE library routines were written by or
1295
+ derived from code written by Li-Wen Chang and are subject
1296
+ to the NCSA Open Source License as follows:
1297
+
1298
+ Copyright (c) 2012, University of Illinois.
1299
+
1300
+ All rights reserved.
1301
+
1302
+ Developed by: IMPACT Group, University of Illinois, http://impact.crhc.illinois.edu
1303
+
1304
+ Permission is hereby granted, free of charge, to any person obtaining
1305
+ a copy of this software and associated documentation files (the
1306
+ "Software"), to deal with the Software without restriction, including
1307
+ without limitation the rights to use, copy, modify, merge, publish,
1308
+ distribute, sublicense, and/or sell copies of the Software, and to
1309
+ permit persons to whom the Software is furnished to do so, subject to
1310
+ the following conditions:
1311
+ * Redistributions of source code must retain the above copyright
1312
+ notice, this list of conditions and the following disclaimer.
1313
+ * Redistributions in binary form must reproduce the above
1314
+ copyright notice, this list of conditions and the following
1315
+ disclaimers in the documentation and/or other materials provided
1316
+ with the distribution.
1317
+ * Neither the names of IMPACT Group, University of Illinois, nor
1318
+ the names of its contributors may be used to endorse or promote
1319
+ products derived from this Software without specific prior
1320
+ written permission.
1321
+
1322
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1323
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1324
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
1325
+ NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
1326
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
1327
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
1328
+ IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
1329
+ SOFTWARE.
1330
+
1331
+ 12. Some of the cuRAND library routines were written by or
1332
+ derived from code written by Mutsuo Saito and Makoto
1333
+ Matsumoto and are subject to the following license:
1334
+
1335
+ Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima
1336
+ University. All rights reserved.
1337
+
1338
+ Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima
1339
+ University and University of Tokyo. All rights reserved.
1340
+
1341
+ Redistribution and use in source and binary forms, with or without
1342
+ modification, are permitted provided that the following conditions are
1343
+ met:
1344
+ * Redistributions of source code must retain the above copyright
1345
+ notice, this list of conditions and the following disclaimer.
1346
+ * Redistributions in binary form must reproduce the above
1347
+ copyright notice, this list of conditions and the following
1348
+ disclaimer in the documentation and/or other materials provided
1349
+ with the distribution.
1350
+ * Neither the name of the Hiroshima University nor the names of
1351
+ its contributors may be used to endorse or promote products
1352
+ derived from this software without specific prior written
1353
+ permission.
1354
+
1355
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1356
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1357
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1358
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1359
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1360
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1361
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1362
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1363
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1364
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1365
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1366
+
1367
+ 13. Some of the cuRAND library routines were derived from
1368
+ code developed by D. E. Shaw Research and are subject to
1369
+ the following license:
1370
+
1371
+ Copyright 2010-2011, D. E. Shaw Research.
1372
+
1373
+ All rights reserved.
1374
+
1375
+ Redistribution and use in source and binary forms, with or without
1376
+ modification, are permitted provided that the following conditions are
1377
+ met:
1378
+ * Redistributions of source code must retain the above copyright
1379
+ notice, this list of conditions, and the following disclaimer.
1380
+ * Redistributions in binary form must reproduce the above
1381
+ copyright notice, this list of conditions, and the following
1382
+ disclaimer in the documentation and/or other materials provided
1383
+ with the distribution.
1384
+ * Neither the name of D. E. Shaw Research nor the names of its
1385
+ contributors may be used to endorse or promote products derived
1386
+ from this software without specific prior written permission.
1387
+
1388
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1389
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1390
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1391
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1392
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1393
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1394
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1395
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1396
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1397
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1398
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1399
+
1400
+ 14. Some of the Math library routines were written by or
1401
+ derived from code developed by Norbert Juffa and are
1402
+ subject to the following license:
1403
+
1404
+ Copyright (c) 2015-2017, Norbert Juffa
1405
+ All rights reserved.
1406
+
1407
+ Redistribution and use in source and binary forms, with or without
1408
+ modification, are permitted provided that the following conditions
1409
+ are met:
1410
+
1411
+ 1. Redistributions of source code must retain the above copyright
1412
+ notice, this list of conditions and the following disclaimer.
1413
+
1414
+ 2. Redistributions in binary form must reproduce the above copyright
1415
+ notice, this list of conditions and the following disclaimer in the
1416
+ documentation and/or other materials provided with the distribution.
1417
+
1418
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1419
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1420
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1421
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1422
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1423
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1424
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1425
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1426
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1427
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1428
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1429
+
1430
+ 15. Licensee's use of the lz4 third party component is
1431
+ subject to the following terms and conditions:
1432
+
1433
+ Copyright (C) 2011-2013, Yann Collet.
1434
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
1435
+
1436
+ Redistribution and use in source and binary forms, with or without
1437
+ modification, are permitted provided that the following conditions are
1438
+ met:
1439
+
1440
+ * Redistributions of source code must retain the above copyright
1441
+ notice, this list of conditions and the following disclaimer.
1442
+ * Redistributions in binary form must reproduce the above
1443
+ copyright notice, this list of conditions and the following disclaimer
1444
+ in the documentation and/or other materials provided with the
1445
+ distribution.
1446
+
1447
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1448
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1449
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1450
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1451
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1452
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1453
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1454
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1455
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1456
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1457
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1458
+
1459
+ 16. The NPP library uses code from the Boost Math Toolkit,
1460
+ and is subject to the following license:
1461
+
1462
+ Boost Software License - Version 1.0 - August 17th, 2003
1463
+ . . . .
1464
+
1465
+ Permission is hereby granted, free of charge, to any person or
1466
+ organization obtaining a copy of the software and accompanying
1467
+ documentation covered by this license (the "Software") to use,
1468
+ reproduce, display, distribute, execute, and transmit the Software,
1469
+ and to prepare derivative works of the Software, and to permit
1470
+ third-parties to whom the Software is furnished to do so, all
1471
+ subject to the following:
1472
+
1473
+ The copyright notices in the Software and this entire statement,
1474
+ including the above license grant, this restriction and the following
1475
+ disclaimer, must be included in all copies of the Software, in whole
1476
+ or in part, and all derivative works of the Software, unless such
1477
+ copies or derivative works are solely in the form of machine-executable
1478
+ object code generated by a source language processor.
1479
+
1480
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1481
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1482
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
1483
+ NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
1484
+ ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
1485
+ OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
1486
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
1487
+ OTHER DEALINGS IN THE SOFTWARE.
1488
+
1489
+ 17. Portions of the Nsight Eclipse Edition is subject to the
1490
+ following license:
1491
+
1492
+ The Eclipse Foundation makes available all content in this plug-in
1493
+ ("Content"). Unless otherwise indicated below, the Content is provided
1494
+ to you under the terms and conditions of the Eclipse Public License
1495
+ Version 1.0 ("EPL"). A copy of the EPL is available at http://
1496
+ www.eclipse.org/legal/epl-v10.html. For purposes of the EPL, "Program"
1497
+ will mean the Content.
1498
+
1499
+ If you did not receive this Content directly from the Eclipse
1500
+ Foundation, the Content is being redistributed by another party
1501
+ ("Redistributor") and different terms and conditions may apply to your
1502
+ use of any object code in the Content. Check the Redistributor's
1503
+ license that was provided with the Content. If no such license exists,
1504
+ contact the Redistributor. Unless otherwise indicated below, the terms
1505
+ and conditions of the EPL still apply to any source code in the
1506
+ Content and such source code may be obtained at http://www.eclipse.org.
1507
+
1508
+ 18. Some of the cuBLAS library routines uses code from
1509
+ OpenAI, which is subject to the following license:
1510
+
1511
+ License URL
1512
+ https://github.com/openai/openai-gemm/blob/master/LICENSE
1513
+
1514
+ License Text
1515
+ The MIT License
1516
+
1517
+ Copyright (c) 2016 OpenAI (http://openai.com), 2016 Google Inc.
1518
+
1519
+ Permission is hereby granted, free of charge, to any person obtaining a copy
1520
+ of this software and associated documentation files (the "Software"), to deal
1521
+ in the Software without restriction, including without limitation the rights
1522
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
1523
+ copies of the Software, and to permit persons to whom the Software is
1524
+ furnished to do so, subject to the following conditions:
1525
+
1526
+ The above copyright notice and this permission notice shall be included in
1527
+ all copies or substantial portions of the Software.
1528
+
1529
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1530
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1531
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
1532
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1533
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
1534
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
1535
+ THE SOFTWARE.
1536
+
1537
+ 19. Licensee's use of the Visual Studio Setup Configuration
1538
+ Samples is subject to the following license:
1539
+
1540
+ The MIT License (MIT)
1541
+ Copyright (C) Microsoft Corporation. All rights reserved.
1542
+
1543
+ Permission is hereby granted, free of charge, to any person
1544
+ obtaining a copy of this software and associated documentation
1545
+ files (the "Software"), to deal in the Software without restriction,
1546
+ including without limitation the rights to use, copy, modify, merge,
1547
+ publish, distribute, sublicense, and/or sell copies of the Software,
1548
+ and to permit persons to whom the Software is furnished to do so,
1549
+ subject to the following conditions:
1550
+
1551
+ The above copyright notice and this permission notice shall be included
1552
+ in all copies or substantial portions of the Software.
1553
+
1554
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
1555
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1556
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
1557
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1558
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
1559
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
1560
+
1561
+ 20. Licensee's use of linmath.h header for CPU functions for
1562
+ GL vector/matrix operations from lunarG is subject to the
1563
+ Apache License Version 2.0.
1564
+
1565
+ 21. The DX12-CUDA sample uses the d3dx12.h header, which is
1566
+ subject to the MIT license .
1567
+
1568
+ -----------------
venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/METADATA ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: nvidia-cuda-runtime-cu12
3
+ Version: 12.1.105
4
+ Summary: CUDA Runtime native Libraries
5
+ Home-page: https://developer.nvidia.com/cuda-zone
6
+ Author: Nvidia CUDA Installer Team
7
+ Author-email: [email protected]
8
+ License: NVIDIA Proprietary Software
9
+ Keywords: cuda,nvidia,runtime,machine learning,deep learning
10
+ Classifier: Development Status :: 4 - Beta
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Intended Audience :: Education
13
+ Classifier: Intended Audience :: Science/Research
14
+ Classifier: License :: Other/Proprietary License
15
+ Classifier: Natural Language :: English
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3.5
18
+ Classifier: Programming Language :: Python :: 3.6
19
+ Classifier: Programming Language :: Python :: 3.7
20
+ Classifier: Programming Language :: Python :: 3.8
21
+ Classifier: Programming Language :: Python :: 3.9
22
+ Classifier: Programming Language :: Python :: 3.10
23
+ Classifier: Programming Language :: Python :: 3.11
24
+ Classifier: Programming Language :: Python :: 3 :: Only
25
+ Classifier: Topic :: Scientific/Engineering
26
+ Classifier: Topic :: Scientific/Engineering :: Mathematics
27
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
28
+ Classifier: Topic :: Software Development
29
+ Classifier: Topic :: Software Development :: Libraries
30
+ Classifier: Operating System :: Microsoft :: Windows
31
+ Classifier: Operating System :: POSIX :: Linux
32
+ Requires-Python: >=3
33
+ License-File: License.txt
34
+
35
+ CUDA Runtime native Libraries
venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/RECORD ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ nvidia/__pycache__/__init__.cpython-310.pyc,,
3
+ nvidia/cuda_runtime/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ nvidia/cuda_runtime/__pycache__/__init__.cpython-310.pyc,,
5
+ nvidia/cuda_runtime/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ nvidia/cuda_runtime/include/__pycache__/__init__.cpython-310.pyc,,
7
+ nvidia/cuda_runtime/include/builtin_types.h,sha256=JxT9Vf2q2snxTBOL9ACzNmYzTWACO2VOVUu1KdFt7_g,3150
8
+ nvidia/cuda_runtime/include/channel_descriptor.h,sha256=no_vNky02LeMLI0CF8GDVGHaPm_uRUGcVUMYdt_Xn4U,21482
9
+ nvidia/cuda_runtime/include/common_functions.h,sha256=22LTZRVcPZzEH6MJda7nNMCvMgIjSTe0OKR7sEQj6kc,3410
10
+ nvidia/cuda_runtime/include/cooperative_groups.h,sha256=JCMxtl4cNUFnymguM4_bTywhcfyxGqu_zOQIUh_Tc_g,59328
11
+ nvidia/cuda_runtime/include/cooperative_groups/details/async.h,sha256=xsEHCZP3nuEY3l2p8SU2d1226XiXumUvDP_Gyh8PdVY,19122
12
+ nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_reduce.h,sha256=vWv1tyxMjSMM2Oc0SdxXhCug_PwaBM6u8iMLjKyeqjE,4561
13
+ nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_scan.h,sha256=DfZv5d5W0XJv-tZVhgrIdjLjs6aCx_u0oy1lDIpjo1Q,7314
14
+ nvidia/cuda_runtime/include/cooperative_groups/details/driver_abi.h,sha256=v-ZUb4UgGKJk6NR2WCWHD3x_42y-togI1urFn70Gi-g,3964
15
+ nvidia/cuda_runtime/include/cooperative_groups/details/functional.h,sha256=2BV8i8Bidz0kgxuYkJCAbwFxOIZRyzHgG-c_rVKhRzc,8905
16
+ nvidia/cuda_runtime/include/cooperative_groups/details/helpers.h,sha256=GHIy-8awPZObSmP_FFWAnX7RQG9s1SD6L3yw5dNzhlM,23569
17
+ nvidia/cuda_runtime/include/cooperative_groups/details/info.h,sha256=LSrEe6iTHuQRfc7RD3EHQbNqpED8eAbX4HLNyxXgKFA,12286
18
+ nvidia/cuda_runtime/include/cooperative_groups/details/invoke.h,sha256=Osq3K-tZuXHVCMQJ708PjPo-BwMhjhjApO4b0TYLFJg,8616
19
+ nvidia/cuda_runtime/include/cooperative_groups/details/memory.h,sha256=WU28eUcYLA1z131VYGulR4eVCSN9xK9KSxbV656YPs0,5484
20
+ nvidia/cuda_runtime/include/cooperative_groups/details/partitioning.h,sha256=8hCh6F8sfkyfRgMirC37Nqv-b-gIY3A_J0eMYqmD2zU,6001
21
+ nvidia/cuda_runtime/include/cooperative_groups/details/reduce.h,sha256=cbDjVSJVQ2_2pf2aP-X_rBkRVEWEFSYkc6oCx1fRQsQ,22744
22
+ nvidia/cuda_runtime/include/cooperative_groups/details/scan.h,sha256=-Ttwb2AfEEY_tsmqJjR2dojkPpoRx387SoqxgvfdBtQ,17166
23
+ nvidia/cuda_runtime/include/cooperative_groups/details/sync.h,sha256=fIEOjj7h3MoPqSOP3rkTNeedgS-0ZRkAHkzTAJDwJUA,10340
24
+ nvidia/cuda_runtime/include/cooperative_groups/memcpy_async.h,sha256=erOIHuObdfxRhBWfrXE3wsZF4B2GUuqwzQrsPwKPpbg,2960
25
+ nvidia/cuda_runtime/include/cooperative_groups/reduce.h,sha256=B0hgDkqM-6ueqTTgb3b34A0RH4vGz8mBf5e2jT1dJ1o,2949
26
+ nvidia/cuda_runtime/include/cooperative_groups/scan.h,sha256=2EU6T5cWNwftm2B7FicV31PojoI61yo5fHXGRYkGk40,2940
27
+ nvidia/cuda_runtime/include/cuComplex.h,sha256=WpcgpaiPhU_o9sTPMcNTEZuyXDIc8x3sz4dUWSztL2g,12186
28
+ nvidia/cuda_runtime/include/cuda.h,sha256=CAY_j4D9qgEVLDxRftRObCI-vCpTTkSrLq9zBRVNwJI,933009
29
+ nvidia/cuda_runtime/include/cudaEGL.h,sha256=_CwaQ4cEP1vfNyBSSd5qFxznPCYOovF6Cpj-QWSIBq4,39544
30
+ nvidia/cuda_runtime/include/cudaEGLTypedefs.h,sha256=xF_FAN1Kar9oyHJ3cCU7jztTpxX8WylpiuYyYpGGHek,5645
31
+ nvidia/cuda_runtime/include/cudaGL.h,sha256=gMT1HPGa-siuji0gAsKYr4X45Lc29HKglC_ttNSGyUM,22501
32
+ nvidia/cuda_runtime/include/cudaGLTypedefs.h,sha256=dClpQI-LuXgF9rPSBsj7OkIg8g_fXDjT0hLZS8TGpOg,6576
33
+ nvidia/cuda_runtime/include/cudaProfilerTypedefs.h,sha256=F2aWLIKv_AhNbxNOaZVcRsxIh0kuscnV8UMWWxkBAlY,3297
34
+ nvidia/cuda_runtime/include/cudaTypedefs.h,sha256=guPSBrr4jNU6WBaBCqRiTuCbOYQDLMqYoz7bemoofWI,101855
35
+ nvidia/cuda_runtime/include/cudaVDPAU.h,sha256=Np7Nc2Wjaz--hkpbhW6f9aapr-NbcPDAgkot0sJerco,12694
36
+ nvidia/cuda_runtime/include/cudaVDPAUTypedefs.h,sha256=wz8nyOUdwM9mH9JO3QZW-A9dyxt-IufSX7nggSXpCNs,4144
37
+ nvidia/cuda_runtime/include/cuda_awbarrier.h,sha256=3ZH-ZlXODhSiwSY9rqSni_EQwi25QMHP6Tm-zOdxBwE,9340
38
+ nvidia/cuda_runtime/include/cuda_awbarrier_helpers.h,sha256=OCskCts5bCKl_RKBe9M74zKSIsVpePn44S_aJp1tFXE,12489
39
+ nvidia/cuda_runtime/include/cuda_awbarrier_primitives.h,sha256=n5__E1jYYDhlgH-f3u8MQjtz57UZ7v5VshhMye1eicM,4699
40
+ nvidia/cuda_runtime/include/cuda_bf16.h,sha256=zQ2idK7w0w7tRlgL_5Nyvy3FSrsOnu2W9Ya2YSFFT-E,149245
41
+ nvidia/cuda_runtime/include/cuda_bf16.hpp,sha256=JlKs0yOZvJQPM6yFO3klxrhPTNN83e7um3ZFHMlYOKI,104876
42
+ nvidia/cuda_runtime/include/cuda_device_runtime_api.h,sha256=2ZuNnXkRON3VchHM-OcKCdhljApIlfh-xKwupEfp5N4,39755
43
+ nvidia/cuda_runtime/include/cuda_egl_interop.h,sha256=PNWYns30MIytJQHSOh7UbZYlaTX5e0bavzK14tde_C8,37109
44
+ nvidia/cuda_runtime/include/cuda_fp16.h,sha256=aFLciB3o9QH3trYFl_P_dX58342UPfHXZGy2OeqEv1s,141782
45
+ nvidia/cuda_runtime/include/cuda_fp16.hpp,sha256=kyHQA_rvZWpGq7sb2k3iTT3Zs1KYtr7TlEVvkGbQ61E,98606
46
+ nvidia/cuda_runtime/include/cuda_fp8.h,sha256=Q3OP5o_3rSYbKtVIlcXVr_CncU3SPM-09j605e2Zegw,13833
47
+ nvidia/cuda_runtime/include/cuda_fp8.hpp,sha256=pgYF_hzC2uAr7KNVyxBqrHTuM2bMaUPMUj7cY0kG3OU,56491
48
+ nvidia/cuda_runtime/include/cuda_gl_interop.h,sha256=VQEswFeOBF6JN6Q0pdlkvc5WT7bD1FnTfKewvANulCc,19150
49
+ nvidia/cuda_runtime/include/cuda_occupancy.h,sha256=Kr9HyOe-hlRjBAzbINwUYkNgbbIgIjuvKs09UZhMYQo,67179
50
+ nvidia/cuda_runtime/include/cuda_pipeline.h,sha256=0enXG49wN4JajlQi3ahbp2ei_ufTY_Mznic7zfWmKHM,8130
51
+ nvidia/cuda_runtime/include/cuda_pipeline_helpers.h,sha256=bo1L7e6vCuM-K3Il8K1z4wJUja5DyXQKdo_hSWUME-E,13852
52
+ nvidia/cuda_runtime/include/cuda_pipeline_primitives.h,sha256=FnJJtuV6rHr6LgL56XDwilcSbFr6W1Hj6mf1AJaMI20,8675
53
+ nvidia/cuda_runtime/include/cuda_runtime.h,sha256=NKUshOJapRWSe0CPJx-KllF9y3ZibUd9bM1OVUU52H4,88281
54
+ nvidia/cuda_runtime/include/cuda_runtime_api.h,sha256=rUQw7deoB1R5LKEgLKwqFdoX6eke4bcp98CJ6iCk_uk,560622
55
+ nvidia/cuda_runtime/include/cuda_surface_types.h,sha256=Mw5Lo4b8Q-f9mogOvATGyHhu9d2t2K6XOxuqtZrSh3A,3688
56
+ nvidia/cuda_runtime/include/cuda_texture_types.h,sha256=ITbX-JNnP7Rm-JSgNVdJ9pq6k8FVor8RbnruDsKq6sk,3688
57
+ nvidia/cuda_runtime/include/cuda_vdpau_interop.h,sha256=bXQanWc2IFXZAKWNGl2xAz9nLvFmQpWyGrsDvfeS9FA,7727
58
+ nvidia/cuda_runtime/include/cudart_platform.h,sha256=YN6sKhB0b9w5tGX1IYL7ulJVPrWAiX9A44qLv4EtW5Q,2717
59
+ nvidia/cuda_runtime/include/device_atomic_functions.h,sha256=o448l6Ep35UHnqcPSQXICvK4Vusc9mVjkyQDq0vV14E,11883
60
+ nvidia/cuda_runtime/include/device_atomic_functions.hpp,sha256=_UsoVsyP7U-9CUUCbC1QLw6IbFFkKzxk458vLbAXzOY,8149
61
+ nvidia/cuda_runtime/include/device_double_functions.h,sha256=KUxId5Z1fx8SWfLRTxPD7RB-zN7zslzb4n7JaJLfL3I,3452
62
+ nvidia/cuda_runtime/include/device_functions.h,sha256=bWSrhTYE9NQlss7xMSMEVusvto9j2fgUDXWVH2W_cOA,3410
63
+ nvidia/cuda_runtime/include/device_launch_parameters.h,sha256=H1_CC-vvAaS26ys4XsTFkMgTxUTciAjdjswjizkisvQ,3846
64
+ nvidia/cuda_runtime/include/device_types.h,sha256=2LFxoZBJPoA5V0H1EbKTEaXDi3GDJPtzOPdRHDaucIQ,3588
65
+ nvidia/cuda_runtime/include/driver_functions.h,sha256=cN3IjRAz2Mj2Pj35SyxJIkZNDDusnJqaqzBdMzpQKbA,4625
66
+ nvidia/cuda_runtime/include/driver_types.h,sha256=Oti6YeNU-DHsXp6r1wu5JSIGOUWgCXiED-N0DEWVlK0,144785
67
+ nvidia/cuda_runtime/include/host_config.h,sha256=BscH_GazAZbbotddVzL5RmafbQ-QjRx8f-I1O01IBW8,3380
68
+ nvidia/cuda_runtime/include/host_defines.h,sha256=bBQwQF5C1N1c2qpLV56g1c-weu9Ysgz-gIf2Kn3uz_A,3386
69
+ nvidia/cuda_runtime/include/library_types.h,sha256=yJvoLFw5oBdRqkQgEhIaX-stsMGlxQW9sZoJ4vbQHwI,4766
70
+ nvidia/cuda_runtime/include/math_constants.h,sha256=cV6hAyQe8X7f7MBtaKjjIJq3BycOUDp6I5cizJX5HLw,7608
71
+ nvidia/cuda_runtime/include/math_functions.h,sha256=5XcC6j-fJKttvhwc4hZNoLHNw808a2ZYIOtZ7ry7yd0,3398
72
+ nvidia/cuda_runtime/include/mma.h,sha256=IY_VenxuEncwGq92MhrWUb-Xswh0ekAXLy9Rbxhxa2Y,2932
73
+ nvidia/cuda_runtime/include/sm_20_atomic_functions.h,sha256=j5zuwIb71KmDLf43RoOjwiudGYvBk_k2PRsj8sy5xXI,4942
74
+ nvidia/cuda_runtime/include/sm_20_atomic_functions.hpp,sha256=Cx__BPJKUPeG5qMxZs9ztfIyqWqt0wZDZi4V_5EV4LQ,3929
75
+ nvidia/cuda_runtime/include/sm_20_intrinsics.h,sha256=cQbeg-K9zWgOI4jAVeUmV1WiWOMF5sHPz_nb3CWdAjU,51052
76
+ nvidia/cuda_runtime/include/sm_20_intrinsics.hpp,sha256=BhEBuXSKBsNGJDBJDtYL0cGRI3wX_w_OIgA5D-YxIWk,7694
77
+ nvidia/cuda_runtime/include/sm_30_intrinsics.h,sha256=kafRv2e_iMvwNfGEP5yIyjRBFx97tdkpT5me9RvbOuo,16375
78
+ nvidia/cuda_runtime/include/sm_30_intrinsics.hpp,sha256=yX0ebd265tJ-BDhvluP2BhadPuWXpRZPI2eeQFFt5ys,24567
79
+ nvidia/cuda_runtime/include/sm_32_atomic_functions.h,sha256=V1VteWKbW09qoItfQp0DbHj7R_e3bxX24NRGnd18Jc4,6812
80
+ nvidia/cuda_runtime/include/sm_32_atomic_functions.hpp,sha256=HcKoB3ujG_AVTzIaD_MjRCaaRZL8khqI_cJqHwCaP5g,5416
81
+ nvidia/cuda_runtime/include/sm_32_intrinsics.h,sha256=o7IwBBKu2lDZwzHHb2pOLAvyCNpCoEKSHPt0dFaSspI,33390
82
+ nvidia/cuda_runtime/include/sm_32_intrinsics.hpp,sha256=Gl8aSLDLcit4W3pKQS19GsDG8RYcwD65HwYB_CeZe8M,70616
83
+ nvidia/cuda_runtime/include/sm_35_atomic_functions.h,sha256=a3XoEsKRCEOf0Q_5Y__rMfmC4pScv4VkUggVgVJVn44,2909
84
+ nvidia/cuda_runtime/include/sm_35_intrinsics.h,sha256=BEiPNO03ZSv5XtMMul5jiTH4oLWlOu3CYkIAgrWslnk,2952
85
+ nvidia/cuda_runtime/include/sm_60_atomic_functions.h,sha256=E5nwZxyIL48AMUIFxZmwzfWaPXOMpjJsoEIQcY7LzPM,20902
86
+ nvidia/cuda_runtime/include/sm_60_atomic_functions.hpp,sha256=bSnj2_G8asEbiu8aPuf3OACDuT_-kw6TuBlU1QtLLfY,15081
87
+ nvidia/cuda_runtime/include/sm_61_intrinsics.h,sha256=eEL9MmGSOpD9DohErXPflc0k2loEcMzDVKZYiUZx7hY,6030
88
+ nvidia/cuda_runtime/include/sm_61_intrinsics.hpp,sha256=N-nQvcBsPMT2Umy5zR69c9K1q366W-Jqe7NpoLTqTmg,6787
89
+ nvidia/cuda_runtime/include/surface_functions.h,sha256=b1O82SAvEgWWxA9uZTWQcGimzZUoem2QbAET3wh3fZc,6782
90
+ nvidia/cuda_runtime/include/surface_indirect_functions.h,sha256=vy9QuFVV-ezZP-x2RT9RLp2qIUgdngACOCmalSfVFPA,10877
91
+ nvidia/cuda_runtime/include/surface_types.h,sha256=Di766cyRUqNN4JkOnYM3teFqrwMZ02hXMDB_R_2_vz4,4460
92
+ nvidia/cuda_runtime/include/texture_fetch_functions.h,sha256=KLCmUxf5aY5_UalX8tSFB6e4TrjA8hyUPxLOkMFltAo,12468
93
+ nvidia/cuda_runtime/include/texture_indirect_functions.h,sha256=lH_y3Ni-hq4RZ0_PMFbBM0th5-OmTn3TtqtpkHHhA8w,21163
94
+ nvidia/cuda_runtime/include/texture_types.h,sha256=cFqQ6sC4y79Q6YxjLSY_bknwMgKJAOwPdKDARLPFrDI,6290
95
+ nvidia/cuda_runtime/include/vector_functions.h,sha256=R5plWOkFciltO_AS5if8NcmsgDp3cFNq6zFFDd3oofk,7847
96
+ nvidia/cuda_runtime/include/vector_functions.hpp,sha256=afXhNSd3LFTZo96EPtesTLfvxd4nTmLVzgkj967rTRg,10060
97
+ nvidia/cuda_runtime/include/vector_types.h,sha256=ruVFRp8RioWR9mrvLXX9S15ZSJ97wqTjA8ORCJKKzOQ,13206
98
+ nvidia/cuda_runtime/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
99
+ nvidia/cuda_runtime/lib/__pycache__/__init__.cpython-310.pyc,,
100
+ nvidia/cuda_runtime/lib/libcudart.so.12,sha256=kzX2opypEBDi2p9A6C-0so46SuIv04XhKT6TvzxGyeY,679264
101
+ nvidia_cuda_runtime_cu12-12.1.105.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
102
+ nvidia_cuda_runtime_cu12-12.1.105.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262
103
+ nvidia_cuda_runtime_cu12-12.1.105.dist-info/METADATA,sha256=9_P2cqjEZ020xhA9HzY1klL7YVuIjKETxs1wKAXeYoQ,1507
104
+ nvidia_cuda_runtime_cu12-12.1.105.dist-info/RECORD,,
105
+ nvidia_cuda_runtime_cu12-12.1.105.dist-info/WHEEL,sha256=-kQi_VMfvRQozZJT7HUPMfY-5vLo0LVTmAylNJ3Ft98,106
106
+ nvidia_cuda_runtime_cu12-12.1.105.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7
venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.37.1)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-manylinux1_x86_64
5
+
venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ nvidia