diff --git a/ckpts/universal/global_step40/zero/12.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/12.post_attention_layernorm.weight/exp_avg_sq.pt
new file mode 100644
index 0000000000000000000000000000000000000000..6bc0fa35b0de3d56a2b56cf9d36074017eafa26d
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/12.post_attention_layernorm.weight/exp_avg_sq.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:edbacfbc93504fa7e7eb8b523eff34505a29340fab99683b37bf3f889aafa6d5
+size 9387
diff --git a/ckpts/universal/global_step40/zero/21.post_attention_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/21.post_attention_layernorm.weight/exp_avg.pt
new file mode 100644
index 0000000000000000000000000000000000000000..d1bb3c9f0a79e42a1acba1f09b451706efa13e40
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/21.post_attention_layernorm.weight/exp_avg.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:67fad17970d044aa639f8e857152bbe14562fbd3507b8c88716266925c0a12f0
+size 9372
diff --git a/ckpts/universal/global_step40/zero/21.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/21.post_attention_layernorm.weight/exp_avg_sq.pt
new file mode 100644
index 0000000000000000000000000000000000000000..ebaca26fcb8f03cc8010921366513e4df1a247ac
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/21.post_attention_layernorm.weight/exp_avg_sq.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bc68041cccb216325c93aae78bc6679c7030ee3664d2e32f0c8c6391a3c04eff
+size 9387
diff --git a/ckpts/universal/global_step40/zero/24.input_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/24.input_layernorm.weight/exp_avg.pt
new file mode 100644
index 0000000000000000000000000000000000000000..7dd3b60bde877013d51c9fabfcbc6687a1cc57d4
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/24.input_layernorm.weight/exp_avg.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:28e584416eb56d9c15050bd85584fcf2a7bc973a61f8e06c0f79c8e5355fccc2
+size 9372
diff --git a/ckpts/universal/global_step40/zero/24.input_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/24.input_layernorm.weight/exp_avg_sq.pt
new file mode 100644
index 0000000000000000000000000000000000000000..fa9dce4441c11dfd19162b3525b9aaefbed67e44
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/24.input_layernorm.weight/exp_avg_sq.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7911a1ed49b9d3ac5d5b893e0f5d625f50a577e1bd2f6369accbac09e199014f
+size 9387
diff --git a/ckpts/universal/global_step40/zero/24.input_layernorm.weight/fp32.pt b/ckpts/universal/global_step40/zero/24.input_layernorm.weight/fp32.pt
new file mode 100644
index 0000000000000000000000000000000000000000..7b69f80afa70e3396a41f8d995dff00876b31cc3
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/24.input_layernorm.weight/fp32.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2eca0475f0c33ca316333e25aed82a0fd9411039108a6630a0c789539f2fc36a
+size 9293
diff --git a/ckpts/universal/global_step40/zero/6.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/6.mlp.dense_4h_to_h.weight/exp_avg.pt
new file mode 100644
index 0000000000000000000000000000000000000000..9b02dd0680bb6568659920590292b71b8225917a
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/6.mlp.dense_4h_to_h.weight/exp_avg.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b0dc0e99d763151d6e6c35ac66b4f6becc058242ba2d0b2740993cb004ae7fc3
+size 33555612
diff --git a/ckpts/universal/global_step40/zero/6.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/6.mlp.dense_4h_to_h.weight/exp_avg_sq.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a4a92899c7b3d5d9ac34bae8e1e8c167847b7a8b
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/6.mlp.dense_4h_to_h.weight/exp_avg_sq.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9ccde16d632556998808de4eec1b7bfd6cadd91b3d420548b42a0e807736f59d
+size 33555627
diff --git a/ckpts/universal/global_step40/zero/8.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step40/zero/8.post_attention_layernorm.weight/fp32.pt
new file mode 100644
index 0000000000000000000000000000000000000000..25c3daefc40106e4d05b43064595bd7490990787
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/8.post_attention_layernorm.weight/fp32.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fe041cea5c5538c8e92e49f1b1fb2b40d287bf000e69881b4342dae9ce870f2b
+size 9293
diff --git a/venv/lib/python3.10/site-packages/accelerate/__init__.py b/venv/lib/python3.10/site-packages/accelerate/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d596762c554a4f07598401d2b1b22c46250dcb55
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/accelerate/__init__.py
@@ -0,0 +1,48 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+__version__ = "0.29.3"
+
+from .accelerator import Accelerator
+from .big_modeling import (
+ cpu_offload,
+ cpu_offload_with_hook,
+ disk_offload,
+ dispatch_model,
+ init_empty_weights,
+ init_on_device,
+ load_checkpoint_and_dispatch,
+)
+from .data_loader import skip_first_batches
+from .inference import prepare_pippy
+from .launchers import debug_launcher, notebook_launcher
+from .state import PartialState
+from .utils import (
+ AutocastKwargs,
+ DataLoaderConfiguration,
+ DeepSpeedPlugin,
+ DistributedDataParallelKwargs,
+ DistributedType,
+ FullyShardedDataParallelPlugin,
+ GradScalerKwargs,
+ InitProcessGroupKwargs,
+ find_executable_batch_size,
+ infer_auto_device_map,
+ is_rich_available,
+ load_checkpoint_in_model,
+ synchronize_rng_states,
+)
+
+
+if is_rich_available():
+ from .utils import rich
diff --git a/venv/lib/python3.10/site-packages/accelerate/accelerator.py b/venv/lib/python3.10/site-packages/accelerate/accelerator.py
new file mode 100644
index 0000000000000000000000000000000000000000..4786946c6da68e12a29b5f3cd799d40e6af49c4a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/accelerate/accelerator.py
@@ -0,0 +1,3259 @@
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import annotations
+
+import contextlib
+import functools
+import json
+import math
+import os
+import re
+import shutil
+import sys
+import warnings
+from collections import OrderedDict
+from contextlib import contextmanager
+from functools import partial
+from types import MethodType
+from typing import Any, Callable, Union
+
+import torch
+import torch.utils.hooks as hooks
+
+from .checkpointing import load_accelerator_state, load_custom_state, save_accelerator_state, save_custom_state
+from .data_loader import DataLoaderDispatcher, prepare_data_loader, skip_first_batches
+from .hooks import AlignDevicesHook
+from .logging import get_logger
+from .optimizer import AcceleratedOptimizer
+from .scheduler import AcceleratedScheduler
+from .state import AcceleratorState, GradientState, PartialState
+from .tracking import LOGGER_TYPE_TO_CLASS, GeneralTracker, filter_trackers
+from .utils import (
+ MODEL_NAME,
+ SAFE_WEIGHTS_INDEX_NAME,
+ SAFE_WEIGHTS_NAME,
+ WEIGHTS_INDEX_NAME,
+ WEIGHTS_NAME,
+ AutocastKwargs,
+ DataLoaderConfiguration,
+ DeepSpeedPlugin,
+ DistributedDataParallelKwargs,
+ DistributedType,
+ DynamoBackend,
+ FP8RecipeKwargs,
+ FullyShardedDataParallelPlugin,
+ GradientAccumulationPlugin,
+ GradScalerKwargs,
+ InitProcessGroupKwargs,
+ KwargsHandler,
+ LoggerType,
+ MegatronLMPlugin,
+ PrecisionType,
+ ProjectConfiguration,
+ RNGType,
+ TorchDynamoPlugin,
+ check_os_kernel,
+ clean_state_dict_for_safetensors,
+ compare_versions,
+ convert_model,
+ convert_outputs_to_fp32,
+ extract_model_from_parallel,
+ gather,
+ gather_object,
+ get_mixed_precision_context_manager,
+ get_pretty_name,
+ has_transformer_engine_layers,
+ is_bf16_available,
+ is_deepspeed_available,
+ is_fp8_available,
+ is_ipex_available,
+ is_megatron_lm_available,
+ is_mlu_available,
+ is_msamp_available,
+ is_npu_available,
+ is_torch_version,
+ is_torch_xla_available,
+ is_xpu_available,
+ load_fsdp_model,
+ load_fsdp_optimizer,
+ pad_across_processes,
+ parse_choice_from_env,
+ recursively_apply,
+ reduce,
+ release_memory,
+ save,
+ save_fsdp_model,
+ save_fsdp_optimizer,
+ shard_checkpoint,
+ wait_for_everyone,
+)
+from .utils.constants import FSDP_PYTORCH_VERSION
+from .utils.modeling import get_state_dict_offloaded_model
+from .utils.other import is_compiled_module
+
+
+if is_deepspeed_available():
+ from .utils import (
+ DeepSpeedEngineWrapper,
+ DeepSpeedOptimizerWrapper,
+ DeepSpeedSchedulerWrapper,
+ DummyOptim,
+ DummyScheduler,
+ )
+
+if is_fp8_available():
+ import transformer_engine.common.recipe as te_recipe
+ from transformer_engine.pytorch import fp8_autocast
+
+
+if is_megatron_lm_available():
+ from .utils import (
+ MegatronEngine,
+ MegatronLMDummyDataLoader,
+ MegatronLMDummyScheduler,
+ MegatronLMOptimizerWrapper,
+ MegatronLMSchedulerWrapper,
+ megatron_lm_initialize,
+ megatron_lm_prepare_data_loader,
+ megatron_lm_prepare_model,
+ megatron_lm_prepare_optimizer,
+ megatron_lm_prepare_scheduler,
+ )
+
+from torch.distributed.algorithms.join import Join
+
+
+if is_torch_xla_available():
+ import torch_xla.amp as xamp
+ import torch_xla.core.xla_model as xm
+ import torch_xla.distributed.xla_multiprocessing as xmp
+
+
+if is_npu_available(check_device=False):
+ import torch_npu # noqa: F401
+
+
+try:
+ from torch.optim.lr_scheduler import LRScheduler
+except ImportError:
+ from torch.optim.lr_scheduler import _LRScheduler as LRScheduler
+
+logger = get_logger(__name__)
+
+# Sentinel values for defaults
+_split_batches = object()
+_dispatch_batches = object()
+_even_batches = object()
+_use_seedable_sampler = object()
+
+
+class Accelerator:
+ """
+ Creates an instance of an accelerator for distributed training (on multi-GPU, TPU) or mixed precision training.
+
+ Args:
+ device_placement (`bool`, *optional*, defaults to `True`):
+ Whether or not the accelerator should put objects on device (tensors yielded by the dataloader, model,
+ etc...).
+ mixed_precision (`str`, *optional*):
+ Whether or not to use mixed precision training. Choose from 'no','fp16','bf16 or 'fp8'. Will default to the
+ value in the environment variable `ACCELERATE_MIXED_PRECISION`, which will use the default value in the
+ accelerate config of the current system or the flag passed with the `accelerate.launch` command. 'fp8'
+ requires the installation of transformers-engine.
+ gradient_accumulation_steps (`int`, *optional*, default to 1):
+ The number of steps that should pass before gradients are accumulated. A number > 1 should be combined with
+ `Accelerator.accumulate`. If not passed, will default to the value in the environment variable
+ `ACCELERATE_GRADIENT_ACCUMULATION_STEPS`. Can also be configured through a `GradientAccumulationPlugin`.
+ cpu (`bool`, *optional*):
+ Whether or not to force the script to execute on CPU. Will ignore GPU available if set to `True` and force
+ the execution on one process only.
+ dataloader_config (`DataLoaderConfiguration`, *optional*):
+ A configuration for how the dataloaders should be handled in distributed scenarios.
+ deepspeed_plugin ([`~utils.DeepSpeedPlugin`], *optional*):
+ Tweak your DeepSpeed related args using this argument. This argument is optional and can be configured
+ directly using *accelerate config*
+ fsdp_plugin ([`~utils.FullyShardedDataParallelPlugin`], *optional*):
+ Tweak your FSDP related args using this argument. This argument is optional and can be configured directly
+ using *accelerate config*
+ megatron_lm_plugin ([`~utils.MegatronLMPlugin`], *optional*):
+ Tweak your MegatronLM related args using this argument. This argument is optional and can be configured
+ directly using *accelerate config*
+ rng_types (list of `str` or [`~utils.RNGType`]):
+ The list of random number generators to synchronize at the beginning of each iteration in your prepared
+ dataloaders. Should be one or several of:
+
+ - `"torch"`: the base torch random number generator
+ - `"cuda"`: the CUDA random number generator (GPU only)
+ - `"xla"`: the XLA random number generator (TPU only)
+ - `"generator"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your
+ dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type.
+
+ Will default to `["torch"]` for PyTorch versions <=1.5.1 and `["generator"]` for PyTorch versions >= 1.6.
+ log_with (list of `str`, [`~utils.LoggerType`] or [`~tracking.GeneralTracker`], *optional*):
+ A list of loggers to be setup for experiment tracking. Should be one or several of:
+
+ - `"all"`
+ - `"tensorboard"`
+ - `"wandb"`
+ - `"comet_ml"`
+ If `"all"` is selected, will pick up all available trackers in the environment and initialize them. Can
+ also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `"all"`.
+ project_config ([`~utils.ProjectConfiguration`], *optional*):
+ A configuration for how saving the state can be handled.
+ project_dir (`str`, `os.PathLike`, *optional*):
+ A path to a directory for storing data such as logs of locally-compatible loggers and potentially saved
+ checkpoints.
+ step_scheduler_with_optimizer (`bool`, *optional`, defaults to `True`):
+ Set `True` if the learning rate scheduler is stepped at the same time as the optimizer, `False` if only
+ done under certain circumstances (at the end of each epoch, for instance).
+ kwargs_handlers (list of [`~utils.KwargsHandler`], *optional*)
+ A list of [`~utils.KwargsHandler`] to customize how the objects related to distributed training or mixed
+ precision are created. See [kwargs](kwargs) for more information.
+ dynamo_backend (`str` or [`~utils.DynamoBackend`], *optional*, defaults to `"no"`):
+ Set to one of the possible dynamo backends to optimize your training with torch dynamo.
+ gradient_accumulation_plugin ([`~utils.GradientAccumulationPlugin`], *optional*):
+ A configuration for how gradient accumulation should be handled, if more tweaking than just the
+ `gradient_accumulation_steps` is needed.
+
+ **Available attributes:**
+
+ - **device** (`torch.device`) -- The device to use.
+ - **distributed_type** ([`~utils.DistributedType`]) -- The distributed training configuration.
+ - **local_process_index** (`int`) -- The process index on the current machine.
+ - **mixed_precision** (`str`) -- The configured mixed precision mode.
+ - **num_processes** (`int`) -- The total number of processes used for training.
+ - **optimizer_step_was_skipped** (`bool`) -- Whether or not the optimizer update was skipped (because of
+ gradient overflow in mixed precision), in which
+ case the learning rate should not be changed.
+ - **process_index** (`int`) -- The overall index of the current process among all processes.
+ - **state** ([`~state.AcceleratorState`]) -- The distributed setup state.
+ - **sync_gradients** (`bool`) -- Whether the gradients are currently being synced across all processes.
+ - **use_distributed** (`bool`) -- Whether the current configuration is for distributed training.
+ """
+
+ def __init__(
+ self,
+ device_placement: bool = True,
+ split_batches: bool = _split_batches,
+ mixed_precision: PrecisionType | str | None = None,
+ gradient_accumulation_steps: int = 1,
+ cpu: bool = False,
+ dataloader_config: DataLoaderConfiguration | None = None,
+ deepspeed_plugin: DeepSpeedPlugin | None = None,
+ fsdp_plugin: FullyShardedDataParallelPlugin | None = None,
+ megatron_lm_plugin: MegatronLMPlugin | None = None,
+ rng_types: list[str | RNGType] | None = None,
+ log_with: str | LoggerType | GeneralTracker | list[str | LoggerType | GeneralTracker] | None = None,
+ project_dir: str | os.PathLike | None = None,
+ project_config: ProjectConfiguration | None = None,
+ gradient_accumulation_plugin: GradientAccumulationPlugin | None = None,
+ dispatch_batches: bool | None = _dispatch_batches,
+ even_batches: bool = _even_batches,
+ use_seedable_sampler: bool = _use_seedable_sampler,
+ step_scheduler_with_optimizer: bool = True,
+ kwargs_handlers: list[KwargsHandler] | None = None,
+ dynamo_backend: DynamoBackend | str | None = None,
+ ):
+ self.trackers = []
+ if project_config is not None:
+ self.project_configuration = project_config
+ else:
+ self.project_configuration = ProjectConfiguration(project_dir=project_dir)
+ if project_dir is not None and self.project_dir is None:
+ self.project_configuration.set_directories(project_dir)
+ if mixed_precision is not None:
+ mixed_precision = str(mixed_precision)
+ if mixed_precision not in PrecisionType:
+ raise ValueError(
+ f"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}"
+ )
+
+ dynamo_plugin = TorchDynamoPlugin() if dynamo_backend is None else TorchDynamoPlugin(backend=dynamo_backend)
+
+ if deepspeed_plugin is None: # init from env variables
+ deepspeed_plugin = (
+ DeepSpeedPlugin() if os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" else None
+ )
+ else:
+ assert isinstance(
+ deepspeed_plugin, DeepSpeedPlugin
+ ), "`deepspeed_plugin` must be an `accelerate.utils.DeepSpeedPlugin` object."
+ os.environ["ACCELERATE_USE_DEEPSPEED"] = "true" # use DeepSpeed if plugin is provided
+ if deepspeed_plugin:
+ if not is_deepspeed_available():
+ raise ImportError("DeepSpeed is not installed => run `pip install deepspeed` or build it from source.")
+ if is_mlu_available():
+ if compare_versions("deepspeed-mlu", "<", "0.10.1"):
+ raise ImportError("DeepSpeed MLU version must be >= 0.10.1. Please update DeepSpeed MLU.")
+ elif compare_versions("deepspeed", "<", "0.9.3"):
+ raise ImportError("DeepSpeed version must be >= 0.9.3. Please update DeepSpeed.")
+
+ mixed_precision = (
+ os.environ.get("ACCELERATE_MIXED_PRECISION", "no") if mixed_precision is None else mixed_precision
+ )
+ deepspeed_plugin.set_mixed_precision(mixed_precision)
+ deepspeed_plugin.set_deepspeed_weakref()
+
+ if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true" or isinstance(
+ fsdp_plugin, FullyShardedDataParallelPlugin
+ ):
+ if is_torch_version("<", FSDP_PYTORCH_VERSION):
+ raise ValueError(f"FSDP requires PyTorch >= {FSDP_PYTORCH_VERSION}")
+
+ if fsdp_plugin is None: # init from env variables
+ fsdp_plugin = (
+ FullyShardedDataParallelPlugin() if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true" else None
+ )
+ else:
+ if not isinstance(fsdp_plugin, FullyShardedDataParallelPlugin):
+ raise TypeError("`fsdp_plugin` must be a FullyShardedDataParallelPlugin object.")
+ os.environ["ACCELERATE_USE_FSDP"] = "true" # use FSDP if plugin is provided
+
+ if megatron_lm_plugin is None: # init from env variables
+ megatron_lm_plugin = (
+ MegatronLMPlugin() if os.environ.get("ACCELERATE_USE_MEGATRON_LM", "false") == "true" else None
+ )
+ else:
+ if not isinstance(megatron_lm_plugin, MegatronLMPlugin):
+ raise TypeError("`megatron_lm_plugin` must be a MegatronLMPlugin object.")
+ os.environ["ACCELERATE_USE_MEGATRON_LM"] = "true" # use MegatronLM if plugin is provided
+
+ if megatron_lm_plugin:
+ if not is_megatron_lm_available():
+ raise ImportError("Megatron is not installed. please build it from source.")
+
+ # Kwargs handlers
+ self.ddp_handler = None
+ self.scaler_handler = None
+ self.init_handler = None
+ self.fp8_recipe_handler = None
+ self.autocast_handler = None
+ if kwargs_handlers is not None:
+ for handler in kwargs_handlers:
+ assert isinstance(
+ handler, KwargsHandler
+ ), f"Unsupported kwargs handler passed: {handler}, must be one that inherits `accelerate.utils.KwargsHandler`."
+ if isinstance(handler, DistributedDataParallelKwargs):
+ if self.ddp_handler is not None:
+ raise ValueError("You can only pass one `DistributedDataParallelKwargs` in `kwargs_handler`.")
+ else:
+ self.ddp_handler = handler
+ elif isinstance(handler, GradScalerKwargs):
+ if self.scaler_handler is not None:
+ raise ValueError("You can only pass one `GradScalerKwargs` in `kwargs_handler`.")
+ else:
+ self.scaler_handler = handler
+ elif isinstance(handler, InitProcessGroupKwargs):
+ if self.init_handler is not None:
+ raise ValueError("You can only pass one `InitProcessGroupKwargs` in `kwargs_handler`.")
+ else:
+ self.init_handler = handler
+ elif isinstance(handler, FP8RecipeKwargs):
+ if self.fp8_recipe_handler is not None:
+ raise ValueError("You can only pass one `FP8RecipeKwargs` in `kwargs_handler`.")
+ else:
+ self.fp8_recipe_handler = handler
+ elif isinstance(handler, AutocastKwargs):
+ if self.autocast_handler is not None:
+ raise ValueError("You can only pass one `AutocastKwargs` in `kwargs_handler`.")
+ else:
+ self.autocast_handler = handler
+
+ kwargs = self.init_handler.to_kwargs() if self.init_handler is not None else {}
+ self.state = AcceleratorState(
+ mixed_precision=mixed_precision,
+ cpu=cpu,
+ dynamo_plugin=dynamo_plugin,
+ deepspeed_plugin=deepspeed_plugin,
+ fsdp_plugin=fsdp_plugin,
+ megatron_lm_plugin=megatron_lm_plugin,
+ _from_accelerator=True,
+ **kwargs,
+ )
+
+ if self.fp8_recipe_handler is None and self.state.mixed_precision == "fp8":
+ self.fp8_recipe_handler = FP8RecipeKwargs(backend="MSAMP" if is_msamp_available() else "TE")
+
+ trackers = filter_trackers(log_with, self.logging_dir)
+ if len(trackers) < 1 and log_with is not None:
+ warnings.warn(f"`log_with={log_with}` was passed but no supported trackers are currently installed.")
+ self.log_with = trackers
+
+ if (
+ (mixed_precision != "bf16")
+ and getattr(self.state, "downcast_bfloat", False)
+ and (self.state.distributedType != DistributedType.XLA)
+ ):
+ raise ValueError("Can only use `downcast_bf16` when using `mixed_precision='bf16'` and on a TPU")
+
+ if gradient_accumulation_plugin is not None:
+ if gradient_accumulation_steps != 1:
+ raise ValueError(
+ "You can only pass one of `gradient_accumulation_steps` and `gradient_accumulation_plugin`. Please only pass in the created `GradientAccumulationPlugin` object."
+ )
+ else:
+ gradient_accumulation_steps = int(
+ parse_choice_from_env("ACCELERATE_GRADIENT_ACCUMULATION_STEPS", gradient_accumulation_steps)
+ )
+ gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=gradient_accumulation_steps)
+ self.gradient_state = GradientState(
+ gradient_accumulation_plugin=gradient_accumulation_plugin,
+ )
+
+ self.device_placement = device_placement
+ if dataloader_config is None:
+ dataloader_config = DataLoaderConfiguration()
+ self.dataloader_config = dataloader_config
+ # Deal with deprecated args
+ # TODO: Remove in v1.0.0
+ deprecated_dl_args = {}
+ if dispatch_batches is not _dispatch_batches:
+ deprecated_dl_args["dispatch_batches"] = dispatch_batches
+ self.dataloader_config.dispatch_batches = dispatch_batches
+ if split_batches is not _split_batches:
+ deprecated_dl_args["split_batches"] = split_batches
+ self.dataloader_config.split_batches = split_batches
+ if even_batches is not _even_batches:
+ deprecated_dl_args["even_batches"] = even_batches
+ self.dataloader_config.even_batches = even_batches
+ if use_seedable_sampler is not _use_seedable_sampler:
+ deprecated_dl_args["use_seedable_sampler"] = use_seedable_sampler
+ self.dataloader_config.use_seedable_sampler = use_seedable_sampler
+ if len(deprecated_dl_args) > 0:
+ values = ", ".join([f"{k}={v}" for k, v in deprecated_dl_args.items()])
+ warnings.warn(
+ f"Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: {deprecated_dl_args.keys()}. "
+ "Please pass an `accelerate.DataLoaderConfiguration` instead: \n"
+ f"dataloader_config = DataLoaderConfiguration({values})",
+ FutureWarning,
+ )
+ self.step_scheduler_with_optimizer = step_scheduler_with_optimizer
+
+ # Mixed precision attributes
+ self.scaler = None
+ self.native_amp = False
+ if (
+ self.state.mixed_precision == "fp16"
+ and self.device.type != "cpu"
+ and self.distributed_type not in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM)
+ ):
+ self.native_amp = True
+ if self.device.type not in ("xpu", "cuda", "mps", "npu", "xla", "mlu") or is_torch_xla_available(
+ check_is_tpu=True
+ ):
+ raise ValueError(f"fp16 mixed precision requires a GPU (not {self.device.type!r}).")
+ kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}
+ if self.distributed_type == DistributedType.FSDP:
+ from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
+
+ self.scaler = ShardedGradScaler(**kwargs)
+ elif is_torch_xla_available(check_is_gpu=True):
+ self.scaler = xamp.GradScaler(**kwargs)
+ elif is_mlu_available():
+ self.scaler = torch.mlu.amp.GradScaler(**kwargs)
+ elif is_npu_available():
+ self.scaler = torch.npu.amp.GradScaler(**kwargs)
+ else:
+ self.scaler = torch.cuda.amp.GradScaler(**kwargs)
+
+ elif self.state.mixed_precision == "bf16" and self.distributed_type not in (
+ DistributedType.DEEPSPEED,
+ DistributedType.MEGATRON_LM,
+ ):
+ if self.device.type in ["cpu", "xpu"]:
+ self.native_amp = True
+ else:
+ self.native_amp = is_bf16_available(True)
+ if mixed_precision == "bf16" and not self.native_amp and not is_torch_xla_available():
+ raise ValueError("bf16 mixed precision requires PyTorch >= 1.10 and a supported device.")
+
+ # Start of internal step tracking
+ self.step = 0
+
+ # Internal references to the training objects
+ self._optimizers = []
+ self._models = []
+ self._schedulers = []
+ self._dataloaders = []
+ self._custom_objects = []
+
+ # Hooks
+ self._load_model_state_pre_hook = OrderedDict()
+ self._save_model_state_pre_hook = OrderedDict()
+
+ # RNG Types
+ self.rng_types = rng_types
+ if self.rng_types is None:
+ self.rng_types = ["generator"]
+
+ # Set a flag tensor for early stopping and other breakpoints
+ self.flag_tensor = None
+
+ check_os_kernel()
+
+ @property
+ def use_distributed(self):
+ """
+ Whether the Accelerator is configured for distributed training
+ """
+ return self.state.use_distributed
+
+ @property
+ def distributed_type(self):
+ return self.state.distributed_type
+
+ @property
+ def num_processes(self):
+ return self.state.num_processes
+
+ @property
+ def process_index(self):
+ return self.state.process_index
+
+ @property
+ def local_process_index(self):
+ return self.state.local_process_index
+
+ @property
+ def device(self):
+ return self.state.device
+
+ @property
+ def split_batches(self):
+ return self.dataloader_config.split_batches
+
+ @property
+ def dispatch_batches(self):
+ return self.dataloader_config.dispatch_batches
+
+ @property
+ def even_batches(self):
+ return self.dataloader_config.even_batches
+
+ @even_batches.setter
+ def even_batches(self, value: bool):
+ self.dataloader_config.even_batches = value
+
+ @property
+ def use_seedable_sampler(self):
+ return self.dataloader_config.use_seedable_sampler
+
+ @property
+ def project_dir(self):
+ return self.project_configuration.project_dir
+
+ @property
+ def logging_dir(self):
+ return self.project_configuration.logging_dir
+
+ @property
+ def save_iteration(self):
+ return self.project_configuration.iteration
+
+ @property
+ def is_main_process(self):
+ """True for one process only."""
+ return self.state.is_main_process
+
+ @property
+ def is_local_main_process(self):
+ """True for one process per server."""
+ return self.state.is_local_main_process
+
+ @property
+ def use_fp16(self):
+ warnings.warn(
+ "The `use_fp16` property is deprecated and will be removed in version 1.0 of Accelerate use "
+ "`Accelerator.mixed_precision == 'fp16'` instead.",
+ FutureWarning,
+ )
+ return self.mixed_precision != "no"
+
+ @property
+ def is_last_process(self):
+ return self.process_index == self.num_processes - 1
+
+ @property
+ def mixed_precision(self):
+ return self.state.mixed_precision
+
+ @contextmanager
+ def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False):
+ """
+ Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing
+ distributed inference, such as with different prompts.
+
+ Note that when using a `dict`, all keys need to have the same number of elements.
+
+ Args:
+ inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`):
+ The input to split between processes.
+ apply_padding (`bool`, `optional`, defaults to `False`):
+ Whether to apply padding by repeating the last element of the input so that all processes have the same
+ number of elements. Useful when trying to perform actions such as `Accelerator.gather()` on the outputs
+ or passing in less inputs than there are processes. If so, just remember to drop the padded elements
+ afterwards.
+
+ Example:
+
+ ```python
+ # Assume there are two processes
+ from accelerate import Accelerator
+
+ accelerator = Accelerator()
+ with accelerator.split_between_processes(["A", "B", "C"]) as inputs:
+ print(inputs)
+ # Process 0
+ ["A", "B"]
+ # Process 1
+ ["C"]
+
+ with accelerator.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs:
+ print(inputs)
+ # Process 0
+ ["A", "B"]
+ # Process 1
+ ["C", "C"]
+ ```
+ """
+ with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs:
+ yield inputs
+
+ def on_main_process(self, function: Callable[..., Any] = None):
+ """
+ A decorator that will run the decorated function on the main process only. Can also be called using the
+ `PartialState` class.
+
+ Args:
+ function (`Callable`): The function to decorate.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+
+
+ >>> @accelerator.on_main_process
+ ... def print_something():
+ ... print("This will be printed by process 0 only.")
+
+
+ >>> print_something()
+ "This will be printed by process 0 only"
+ ```
+ """
+ # For times when the `Accelerator` object itself utilizes this decorator.
+ if function is None:
+ if "Accelerator." in self.__qualname__:
+ function = self
+ else:
+ raise ValueError(
+ "The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object."
+ )
+
+ def _inner(*args, **kwargs):
+ return PartialState().on_main_process(function)(*args, **kwargs)
+
+ return _inner
+
+ def on_local_main_process(self, function: Callable[..., Any] = None):
+ """
+ A decorator that will run the decorated function on the local main process only. Can also be called using the
+ `PartialState` class.
+
+ Args:
+ function (`Callable`): The function to decorate.
+
+ Example:
+ ```python
+ # Assume we have 2 servers with 4 processes each.
+ from accelerate import Accelerator
+
+ accelerator = Accelerator()
+
+
+ @accelerator.on_local_main_process
+ def print_something():
+ print("This will be printed by process 0 only on each server.")
+
+
+ print_something()
+ # On server 1:
+ "This will be printed by process 0 only"
+ # On server 2:
+ "This will be printed by process 0 only"
+ ```
+ """
+ # For times when the `Accelerator` object itself utilizes this decorator.
+ if function is None:
+ if "Accelerator." in self.__qualname__:
+ function = self
+ else:
+ raise ValueError(
+ "The `on_local_main_process` decorator must be called with a function on an instantiated `Accelerator` object."
+ )
+
+ def _inner(*args, **kwargs):
+ return PartialState().on_local_main_process(function)(*args, **kwargs)
+
+ return _inner
+
+ def on_last_process(self, function: Callable[..., Any]):
+ """
+ A decorator that will run the decorated function on the last process only. Can also be called using the
+ `PartialState` class.
+
+ Args:
+ function (`Callable`): The function to decorate.
+
+ Example:
+ ```python
+ # Assume we have 4 processes.
+ from accelerate import Accelerator
+
+ accelerator = Accelerator()
+
+
+ @accelerator.on_last_process
+ def print_something():
+ print(f"Printed on process {accelerator.process_index}")
+
+
+ print_something()
+ "Printed on process 3"
+ ```
+ """
+ # For times when the `Accelerator` object itself utilizes this decorator.
+ if function is None:
+ if "Accelerator." in self.__qualname__:
+ function = self
+ else:
+ raise ValueError(
+ "The `on_last_process` decorator must be called with a function on an instantiated `Accelerator` object."
+ )
+
+ def _inner(*args, **kwargs):
+ return PartialState().on_last_process(function)(*args, **kwargs)
+
+ return _inner
+
+ def on_process(self, function: Callable[..., Any] = None, process_index: int = None):
+ """
+ A decorator that will run the decorated function on a given process index only. Can also be called using the
+ `PartialState` class.
+
+ Args:
+ function (`Callable`, `optional`):
+ The function to decorate.
+ process_index (`int`, `optional`):
+ The index of the process on which to run the function.
+
+ Example:
+ ```python
+ # Assume we have 4 processes.
+ from accelerate import Accelerator
+
+ accelerator = Accelerator()
+
+
+ @accelerator.on_process(process_index=2)
+ def print_something():
+ print(f"Printed on process {accelerator.process_index}")
+
+
+ print_something()
+ "Printed on process 2"
+ ```
+ """
+ # Initial construction of the decorator.
+ if (self is not None) and (process_index is not None) and (function is None):
+ return partial(self.on_process, process_index=process_index)
+ # For times when the `Accelerator` object itself utilizes this decorator.
+ if function is None:
+ if "Accelerator." in self.__qualname__:
+ function = self
+ else:
+ raise ValueError(
+ "The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object."
+ )
+
+ def _inner(*args, **kwargs):
+ return PartialState().on_process(function, process_index)(*args, **kwargs)
+
+ return _inner
+
+ def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None):
+ """
+ A decorator that will run the decorated function on a given local process index only. Can also be called using
+ the `PartialState` class.
+
+ Args:
+ function (`Callable`, *optional*):
+ The function to decorate.
+ local_process_index (`int`, *optional*):
+ The index of the local process on which to run the function.
+
+ Example:
+ ```python
+ # Assume we have 2 servers with 4 processes each.
+ from accelerate import Accelerator
+
+ accelerator = Accelerator()
+
+
+ @accelerator.on_local_process(local_process_index=2)
+ def print_something():
+ print(f"Printed on process {accelerator.local_process_index}")
+
+
+ print_something()
+ # On server 1:
+ "Printed on process 2"
+ # On server 2:
+ "Printed on process 2"
+ ```
+ """
+ # Initial construction of the decorator.
+ if (self is not None) and (local_process_index is not None) and (function is None):
+ return partial(self.on_local_process, local_process_index=local_process_index)
+ # For times when the `Accelerator` object itself utilizes this decorator.
+ if function is None:
+ if "Accelerator." in self.__qualname__:
+ function = self
+ else:
+ raise ValueError(
+ "The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object."
+ )
+
+ def _inner(*args, **kwargs):
+ return PartialState().on_local_process(function, local_process_index)(*args, **kwargs)
+
+ return _inner
+
+ @contextmanager
+ def main_process_first(self):
+ """
+ Lets the main process go first inside a with block.
+
+ The other processes will enter the with block after the main process exits.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> with accelerator.main_process_first():
+ ... # This will be printed first by process 0 then in a seemingly
+ ... # random order by the other processes.
+ ... print(f"This will be printed by process {accelerator.process_index}")
+ ```
+ """
+ with self.state.main_process_first():
+ yield
+
+ @contextmanager
+ def local_main_process_first(self):
+ """
+ Lets the local main process go inside a with block.
+
+ The other processes will enter the with block after the main process exits.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> with accelerator.local_main_process_first():
+ ... # This will be printed first by local process 0 then in a seemingly
+ ... # random order by the other processes.
+ ... print(f"This will be printed by process {accelerator.local_process_index}")
+ ```
+ """
+ with self.state.local_main_process_first():
+ yield
+
+ @contextmanager
+ def no_sync(self, model):
+ """
+ A context manager to disable gradient synchronizations across DDP processes by calling
+ `torch.nn.parallel.DistributedDataParallel.no_sync`.
+
+ If `model` is not in DDP, this context manager does nothing
+
+ Args:
+ model (`torch.nn.Module`):
+ PyTorch Module that was prepared with `Accelerator.prepare`
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> dataloader, model, optimizer = accelerator.prepare(dataloader, model, optimizer)
+ >>> input_a = next(iter(dataloader))
+ >>> input_b = next(iter(dataloader))
+
+ >>> with accelerator.no_sync():
+ ... outputs = model(input_a)
+ ... loss = loss_func(outputs)
+ ... accelerator.backward(loss)
+ ... # No synchronization across processes, only accumulate gradients
+ >>> outputs = model(input_b)
+ >>> accelerator.backward(loss)
+ >>> # Synchronization across all processes
+ >>> optimizer.step()
+ >>> optimizer.zero_grad()
+ ```
+ """
+ context = contextlib.nullcontext
+ if self.use_distributed:
+ context = getattr(model, "no_sync", context)
+
+ with context():
+ yield
+
+ @staticmethod
+ @contextmanager
+ def trigger_sync_in_backward(model):
+ """Trigger the sync of the gradients in the next backward pass of the model after multiple forward passes under
+ `Accelerator.no_sync` (only applicable in multi-GPU scenarios).
+
+ If the script is not launched in distributed mode, this context manager does nothing.
+
+ Args:
+ model (`torch.nn.Module`):
+ The model for which to trigger the gradient synchronization.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> dataloader, model, optimizer = accelerator.prepare(dataloader, model, optimizer)
+
+ >>> with accelerator.no_sync():
+ ... loss_a = loss_func(model(input_a)) # first forward pass
+ ... loss_b = loss_func(model(input_b)) # second forward pass
+ >>> accelerator.backward(loss_a) # No synchronization across processes, only accumulate gradients
+ >>> with accelerator.trigger_sync_in_backward(model):
+ ... accelerator.backward(loss_b) # Synchronization across all processes
+ >>> optimizer.step()
+ >>> optimizer.zero_grad()
+ ```
+ """
+ if not isinstance(model, torch.nn.parallel.DistributedDataParallel):
+ yield
+ return
+
+ old_require_backward_grad_sync = model.require_backward_grad_sync
+ old_require_forward_param_sync = model.require_forward_param_sync
+
+ # EXPERIMENTAL: This will force grad sync during `backward()`, but it is unknown if it breaks other DDP features.
+ # https://github.com/pytorch/pytorch/blob/e1502c0cdbfd17548c612f25d5a65b1e4b86224d/torch/nn/parallel/distributed.py#L1453-L1466
+ model.require_backward_grad_sync = True
+ model.require_forward_param_sync = True
+ # https://github.com/pytorch/pytorch/blob/e1502c0cdbfd17548c612f25d5a65b1e4b86224d/torch/csrc/distributed/c10d/reducer.cpp#L1371-L1402
+ model.reducer.prepare_for_backward([])
+ try:
+ yield
+ finally:
+ model.require_backward_grad_sync = old_require_backward_grad_sync
+ model.require_forward_param_sync = old_require_forward_param_sync
+
+ def _do_sync(self, force: bool = False):
+ "Sets the right `sync_gradients` context and either resets or increases `self.step`"
+ if self.gradient_state.sync_with_dataloader and self.gradient_state.end_of_dataloader:
+ self.step = 0
+ self.gradient_state._set_sync_gradients(True)
+ else:
+ self.step += 1
+ self.gradient_state._set_sync_gradients(force or ((self.step % self.gradient_state.num_steps) == 0))
+
+ @property
+ def sync_gradients(self):
+ return self.gradient_state.sync_gradients
+
+ @sync_gradients.setter
+ def sync_gradients(self, sync_gradients):
+ self.gradient_state.sync_gradients = sync_gradients
+
+ @property
+ def gradient_accumulation_steps(self):
+ return self.gradient_state.num_steps
+
+ @gradient_accumulation_steps.setter
+ def gradient_accumulation_steps(self, gradient_accumulation_steps):
+ self.gradient_state.plugin_kwargs.update({"num_steps": gradient_accumulation_steps})
+
+ @contextmanager
+ def accumulate(self, *models):
+ """
+ A context manager that will lightly wrap around and perform gradient accumulation automatically
+
+ Args:
+ *models (list of `torch.nn.Module`):
+ PyTorch Modules that were prepared with `Accelerator.prepare`. Models passed to `accumulate()` will
+ skip gradient syncing during backward pass in distributed training
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator(gradient_accumulation_steps=1)
+ >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler)
+
+ >>> for input, output in dataloader:
+ ... with accelerator.accumulate(model):
+ ... outputs = model(input)
+ ... loss = loss_func(outputs)
+ ... loss.backward()
+ ... optimizer.step()
+ ... scheduler.step()
+ ... optimizer.zero_grad()
+ ```
+ """
+ # sync_each_batch=True will guarantee below that self.sync_gradients=True, therefore
+ # resulting in the nullcontext always being selected.
+ self._do_sync(force=self.gradient_state.plugin_kwargs.get("sync_each_batch", False))
+ with contextlib.ExitStack() as cm_stack:
+ for m in models:
+ cm_stack.enter_context(contextlib.nullcontext() if self.sync_gradients else self.no_sync(m))
+ yield
+
+ @contextmanager
+ def join_uneven_inputs(self, joinables, even_batches=None):
+ """
+ A context manager that facilitates distributed training or evaluation on uneven inputs, which acts as a wrapper
+ around `torch.distributed.algorithms.join`. This is useful when the total batch size does not evenly divide the
+ length of the dataset.
+
+ Args:
+ joinables (`list[torch.distributed.algorithms.Joinable]`):
+ A list of models or optimizers that subclass `torch.distributed.algorithms.Joinable`. Most commonly, a
+ PyTorch Module that was prepared with `Accelerator.prepare` for DistributedDataParallel training.
+ even_batches (`bool`, *optional*)
+ If set, this will override the value of `even_batches` set in the `Accelerator`. If it is not provided,
+ the default `Accelerator` value wil be used.
+
+
+
+ `join_uneven_inputs` is only supported for Distributed Data Parallel training on multiple GPUs. For any other
+ configuration, this method will have no effect.
+
+
+
+
+
+ Overidding `even_batches` will not affect iterable-style data loaders.
+
+
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator(even_batches=True)
+ >>> ddp_model, optimizer, dataloader = accelerator.prepare(model, optimizer, dataloader)
+
+ >>> with accelerator.join_uneven_inputs([ddp_model], even_batches=False):
+ ... for input, output in dataloader:
+ ... outputs = model(input)
+ ... loss = loss_func(outputs)
+ ... loss.backward()
+ ... optimizer.step()
+ ... optimizer.zero_grad()
+ ```
+ """
+ if self.distributed_type in (
+ DistributedType.MULTI_GPU,
+ DistributedType.MULTI_NPU,
+ DistributedType.MULTI_MLU,
+ DistributedType.MULTI_XPU,
+ ):
+ dl_even_batches_values = []
+
+ if even_batches is not None:
+ iterable_dl_seen = False
+ # override value in batch sampler for map-style datasets
+ for dl_idx, dl in enumerate(self._dataloaders):
+ if isinstance(dl, DataLoaderDispatcher):
+ iterable_dl_seen = True
+ continue
+ dl_even_batches_values.append((dl_idx, dl.batch_sampler.even_batches))
+ dl.batch_sampler.even_batches = even_batches
+
+ if iterable_dl_seen:
+ warnings.warn(
+ "Overridding even_batches is only supported for map-style datasets, yet some dataloaders given were iterable"
+ )
+ else:
+ even_batches = self.even_batches
+
+ enable_join = False if even_batches else True
+ try:
+ with Join(joinables, enable=enable_join, throw_on_early_termination=False):
+ yield
+ finally:
+ # reset any batch samplers that have been modified
+ for dl_idx, even_batches_value in dl_even_batches_values:
+ self._dataloaders[dl_idx].batch_sampler.even_batches = even_batches_value
+ else:
+ # Even when disabled, Join expects models to subclass Joinable, so skip entirely for single process runs
+ if self.distributed_type != DistributedType.NO:
+ warnings.warn(
+ "Joining uneven inputs is only supported for multi-GPU training, as a result `join_uneven_inputs` will have no effect."
+ )
+
+ with contextlib.nullcontext(joinables):
+ yield
+
+ def print(self, *args, **kwargs):
+ """
+ Drop in replacement of `print()` to only print once per server.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> accelerator.print("Hello world!")
+ ```
+ """
+ self.state.print(*args, **kwargs)
+
+ def _prepare_one(self, obj, first_pass=False, device_placement=None):
+ # First pass of preparation: DataLoader, model, optimizer
+ if first_pass:
+ if isinstance(obj, torch.utils.data.DataLoader):
+ return self.prepare_data_loader(obj, device_placement=device_placement)
+ elif isinstance(obj, torch.nn.Module):
+ return self.prepare_model(obj, device_placement=device_placement)
+ elif isinstance(obj, torch.optim.Optimizer):
+ optimizer = self.prepare_optimizer(obj, device_placement=device_placement)
+ return optimizer
+ # Second pass of preparation: LR scheduler (which need the full list of optimizers)
+ elif isinstance(obj, LRScheduler):
+ scheduler = self.prepare_scheduler(obj)
+ return scheduler
+ # Return the unprocessed object if previous criteria was not met
+ return obj
+
+ def prepare(self, *args, device_placement=None):
+ """
+ Prepare all objects passed in `args` for distributed training and mixed precision, then return them in the same
+ order.
+
+ Args:
+ *args (list of objects):
+ Any of the following type of objects:
+
+ - `torch.utils.data.DataLoader`: PyTorch Dataloader
+ - `torch.nn.Module`: PyTorch Module
+ - `torch.optim.Optimizer`: PyTorch Optimizer
+ - `torch.optim.lr_scheduler.LRScheduler`: PyTorch LR Scheduler
+
+ device_placement (`list[bool]`, *optional*):
+ Used to customize whether automatic device placement should be performed for each object passed. Needs
+ to be a list of the same length as `args`. Not compatible with DeepSpeed or FSDP.
+
+
+
+ You don't need to prepare a model if you only use it for inference without any kind of mixed precision
+
+
+
+ Examples:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> # Assume a model, optimizer, data_loader and scheduler are defined
+ >>> model, optimizer, data_loader, scheduler = accelerator.prepare(model, optimizer, data_loader, scheduler)
+ ```
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> # Assume a model, optimizer, data_loader and scheduler are defined
+ >>> device_placement = [True, True, False, False]
+ >>> # Will place the first to items passed in automatically to the right device but not the last two.
+ >>> model, optimizer, data_loader, scheduler = accelerator.prepare(
+ ... model, optimizer, data_loader, scheduler, device_placement=device_placement
+ ... )
+ ```
+ """
+ if device_placement is None:
+ device_placement = [None for _ in args]
+ elif self.distributed_type in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM):
+ raise ValueError("You can't customize device placements with DeepSpeed or Megatron-LM.")
+ elif len(device_placement) != len(args):
+ raise ValueError(
+ f"`device_placement` should be a list with {len(args)} elements (the number of objects passed)."
+ )
+
+ for obj in args:
+ # TODO: Look at enabling native TP training directly with a proper config
+ if (
+ isinstance(obj, torch.nn.Module)
+ and self.verify_device_map(obj)
+ and self.distributed_type != DistributedType.NO
+ and os.environ.get("ACCELERATE_BYPASS_DEVICE_MAP", "false") != "true"
+ ):
+ raise ValueError(
+ "You can't train a model that has been loaded with `device_map='auto'` in any distributed mode."
+ " Please rerun your script specifying `--num_processes=1` or by launching with `python {{myscript.py}}`."
+ )
+
+ if self.distributed_type == DistributedType.DEEPSPEED:
+ model_count = 0
+ for obj in args:
+ if isinstance(obj, torch.nn.Module):
+ model_count += 1
+ if model_count > 1:
+ raise AssertionError(
+ "You can't use same `Accelerator()` instance with multiple models when using DeepSpeed"
+ )
+
+ # On TPUs, putting the model on the XLA device will create new parameters, so the corresponding optimizer will
+ # have parameters disconnected from the model (so no training :-( ).
+ # If the model and optimizer have parameters on different devices we raise an error.
+ if self.distributed_type == DistributedType.XLA:
+ model_device, optimizer_device = self._get_devices()
+ if model_device is not None and optimizer_device is not None and model_device != optimizer_device:
+ raise ValueError(
+ "The model and the optimizer parameters are not on the same device, which probably means you "
+ "created an optimizer around your model **before** putting on the device. Make sure the line "
+ "model.to(device) is before the optimizer creation in your script or remove it entirely and use "
+ "the flag default value for `device_placement` in your `Accelerator` to let it handle that "
+ "part for you."
+ )
+
+ # If we're dealing with device placement, this deals with that by...
+ tpu_should_fix_optimizer = self.device_placement and self.distributed_type == DistributedType.XLA
+ if tpu_should_fix_optimizer or (self.mixed_precision == "fp8" and self.fp8_recipe_handler.backend == "TE"):
+ # 1. grabbing old model parameters
+ old_named_params = self._get_named_parameters(*args)
+
+ if self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.NO]:
+ if self.device.type == "cpu" and self.state.use_ipex:
+ args = self._prepare_ipex(*args)
+ elif self.device.type == "xpu" and is_xpu_available():
+ args = self._prepare_ipex(*args)
+ if self.distributed_type == DistributedType.DEEPSPEED:
+ result = self._prepare_deepspeed(*args)
+ elif self.distributed_type == DistributedType.MEGATRON_LM:
+ result = self._prepare_megatron_lm(*args)
+ else:
+ if self.mixed_precision == "fp8" and self.fp8_recipe_handler.backend == "MSAMP":
+ args = self._prepare_msamp(*args)
+ # MS-AMP will handle the device placement
+ device_placement = [False for _ in args]
+ result = tuple(
+ self._prepare_one(obj, first_pass=True, device_placement=d) for obj, d in zip(args, device_placement)
+ )
+ result = tuple(self._prepare_one(obj, device_placement=d) for obj, d in zip(result, device_placement))
+
+ if tpu_should_fix_optimizer or (self.mixed_precision == "fp8" and self.fp8_recipe_handler.backend == "TE"):
+ # 2. grabbing new model parameters
+ new_named_params = self._get_named_parameters(*result)
+ # 3. building a map from the first to the second
+ mapping = {p: new_named_params[n] for n, p in old_named_params.items()}
+ # 4. using that map to update the parameters of the optimizer
+ for obj in result:
+ if isinstance(obj, torch.optim.Optimizer):
+ obj._switch_parameters(mapping)
+
+ for item in result:
+ if any(
+ item in container
+ for container in (self._dataloaders, self._models, self._optimizers, self._schedulers)
+ ):
+ item._is_accelerate_prepared = True
+
+ return result if len(result) > 1 else result[0]
+
+ def prepare_model(self, model: torch.nn.Module, device_placement: bool = None, evaluation_mode: bool = False):
+ """
+ Prepares a PyTorch model for training in any distributed setup. It is recommended to use
+ [`Accelerator.prepare`] instead.
+
+ Args:
+ model (`torch.nn.Module`):
+ A PyTorch model to prepare. You don't need to prepare a model if it is used only for inference without
+ any kind of mixed precision
+ device_placement (`bool`, *optional*):
+ Whether or not to place the model on the proper device. Will default to `self.device_placement`.
+ evaluation_mode (`bool`, *optional*, defaults to `False`):
+ Whether or not to set the model for evaluation only, by just applying mixed precision and
+ `torch.compile` (if configured in the `Accelerator` object).
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> # Assume a model is defined
+ >>> model = accelerator.prepare_model(model)
+ ```
+ """
+ if device_placement is None:
+ device_placement = self.device_placement and self.distributed_type != DistributedType.FSDP
+ self._models.append(model)
+
+ # TODO: Look at enabling native TP training directly with a proper config
+ if (
+ self.verify_device_map(model)
+ and self.distributed_type != DistributedType.NO
+ and os.environ.get("ACCELERATE_BYPASS_DEVICE_MAP", "false") != "true"
+ ):
+ raise ValueError(
+ "You can't train a model that has been loaded with `device_map='auto'` in any distributed mode."
+ " Please rerun your script specifying `--num_processes=1` or by launching with `python {{myscript.py}}`."
+ )
+
+ if self.native_amp:
+ model._original_forward = model.forward
+ model_forward_func = model.forward.__func__ if hasattr(model.forward, "__func__") else model.forward
+ autocast_context = get_mixed_precision_context_manager(self.native_amp, self.autocast_handler)
+ new_forward = autocast_context(model_forward_func)
+ if hasattr(model.forward, "__func__"):
+ model.forward = MethodType(new_forward, model)
+ model.forward = MethodType(convert_outputs_to_fp32(model.forward.__func__), model)
+ else:
+ model.forward = convert_outputs_to_fp32(new_forward)
+ elif self.mixed_precision == "fp8" and self.fp8_recipe_handler.backend == "TE":
+ if not has_transformer_engine_layers(model):
+ with torch.no_grad():
+ convert_model(model)
+ model._converted_to_transformer_engine = True
+ model._original_forward = model.forward
+
+ kwargs = self.fp8_recipe_handler.to_kwargs() if self.fp8_recipe_handler is not None else {}
+ if "fp8_format" in kwargs:
+ kwargs["fp8_format"] = getattr(te_recipe.Format, kwargs["fp8_format"])
+ fp8_recipe = te_recipe.DelayedScaling(**kwargs)
+ model.forward = fp8_autocast(enabled=True, fp8_recipe=fp8_recipe)(model.forward)
+
+ if (getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False)) and getattr(
+ model, "hf_device_map", False
+ ):
+ model_devices = set(model.hf_device_map.values())
+ if len(model_devices) > 1 and self.distributed_type != DistributedType.NO:
+ raise ValueError(
+ "You can't train a model that has been loaded in 8-bit precision on multiple devices in any distributed mode."
+ " In order to use 8-bit models that have been loaded across multiple GPUs the solution is to use Naive Pipeline Parallelism."
+ " Therefore you should not specify that you are under any distributed regime in your accelerate config."
+ )
+ current_device = list(model_devices)[0]
+ current_device_index = current_device.index if isinstance(current_device, torch.device) else current_device
+
+ if torch.device(current_device_index) != self.device:
+ # if on the first device (GPU 0) we don't care
+ if (self.device.index is not None) or (current_device_index != 0):
+ raise ValueError(
+ "You can't train a model that has been loaded in 8-bit precision on a different device than the one "
+ "you're training on. Make sure you loaded the model on the correct device using for example `device_map={'':torch.cuda.current_device() or device_map={'':torch.xpu.current_device()}"
+ )
+
+ if "cpu" in model_devices or "disk" in model_devices:
+ raise ValueError(
+ "You can't train a model that has been loaded in 8-bit precision with CPU or disk offload."
+ )
+ elif device_placement and not self.verify_device_map(model):
+ model = model.to(self.device)
+ if not evaluation_mode:
+ if self.distributed_type in (
+ DistributedType.MULTI_GPU,
+ DistributedType.MULTI_MLU,
+ DistributedType.MULTI_NPU,
+ DistributedType.MULTI_XPU,
+ ):
+ if any(p.requires_grad for p in model.parameters()):
+ kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}
+ # TODO: Look at enabling native TP training directly with a proper config
+ if os.environ.get("ACCELERATE_BYPASS_DEVICE_MAP", "false") != "true":
+ device_ids, output_device = [self.local_process_index], self.local_process_index
+ else:
+ device_ids, output_device = None, None
+
+ model = torch.nn.parallel.DistributedDataParallel(
+ model, device_ids=device_ids, output_device=output_device, **kwargs
+ )
+ elif self.distributed_type == DistributedType.FSDP:
+ from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
+
+ # Check if the model is already a FSDP model due to `Manual Wrapping` and if so,
+ # don't wrap it again
+ # In case the model is already compiled using PyTorch 2.0 and the wrapped model in it
+ # is a FSDP model, don't wrap it again
+ is_type_fsdp = isinstance(model, FSDP) or (
+ is_compiled_module(model) and isinstance(model._orig_mod, FSDP)
+ )
+
+ if not is_type_fsdp:
+ self.state.fsdp_plugin.set_auto_wrap_policy(model)
+ fsdp_plugin = self.state.fsdp_plugin
+ kwargs = {
+ "sharding_strategy": fsdp_plugin.sharding_strategy,
+ "cpu_offload": fsdp_plugin.cpu_offload,
+ "auto_wrap_policy": fsdp_plugin.auto_wrap_policy,
+ "mixed_precision": fsdp_plugin.mixed_precision_policy,
+ "sync_module_states": fsdp_plugin.sync_module_states,
+ "backward_prefetch": fsdp_plugin.backward_prefetch,
+ "forward_prefetch": fsdp_plugin.forward_prefetch,
+ "use_orig_params": fsdp_plugin.use_orig_params,
+ "param_init_fn": fsdp_plugin.param_init_fn,
+ "ignored_modules": fsdp_plugin.ignored_modules,
+ "limit_all_gathers": fsdp_plugin.limit_all_gathers,
+ "device_id": self.device,
+ }
+ model = FSDP(model, **kwargs)
+ if fsdp_plugin.activation_checkpointing:
+ from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
+ CheckpointImpl,
+ apply_activation_checkpointing,
+ checkpoint_wrapper,
+ )
+
+ apply_activation_checkpointing(
+ model,
+ checkpoint_wrapper_fn=functools.partial(
+ checkpoint_wrapper,
+ checkpoint_impl=CheckpointImpl.NO_REENTRANT,
+ ),
+ auto_wrap_policy=fsdp_plugin.auto_wrap_policy,
+ )
+ # if the previous and current models are same, delete the previous one
+ if len(self._models) > 1 and (self._models[-2] is self._models[-1]):
+ del self._models[-2]
+ self._models[-1] = model
+ elif self.distributed_type == DistributedType.MULTI_CPU:
+ kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}
+ model = torch.nn.parallel.DistributedDataParallel(model, **kwargs)
+ elif self.distributed_type == DistributedType.XLA and self.state.fork_launched:
+ model = xmp.MpModelWrapper(model).to(self.device)
+ # torch.compile should be called last and only if the model isn't already compiled.
+ if self.state.dynamo_plugin.backend != DynamoBackend.NO and not is_compiled_module(model):
+ if not is_torch_version(">=", "2.0"):
+ raise ValueError("Using `torch.compile` requires PyTorch 2.0 or higher.")
+ model = torch.compile(model, **self.state.dynamo_plugin.to_kwargs())
+ return model
+
+ def _prepare_deepspeed(self, *args):
+ import deepspeed
+
+ deepspeed_plugin = self.state.deepspeed_plugin
+
+ is_dataloader_present = any(isinstance(obj, torch.utils.data.DataLoader) for obj in args)
+ result = [
+ self._prepare_one(obj, first_pass=True) if isinstance(obj, torch.utils.data.DataLoader) else obj
+ for obj in args
+ ]
+
+ if deepspeed_plugin.is_auto("train_micro_batch_size_per_gpu"):
+ if is_dataloader_present:
+ batch_sizes = [obj.batch_size for obj in args if hasattr(obj, "batch_size")]
+ if any(bs is None for bs in batch_sizes):
+ raise ValueError(
+ "At least one of the dataloaders passed to `accelerate.prepare()` has `None` as batch size. "
+ "Please set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file "
+ "or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu']`."
+ )
+ if self.split_batches:
+ batch_sizes = [batch_size // self.num_processes for batch_size in batch_sizes]
+
+ batch_size_per_device = min(batch_sizes) if deepspeed_plugin.is_train_batch_min else max(batch_sizes)
+ if len(batch_sizes) > 1:
+ logger.info(
+ "Since you passed both train and evaluation dataloader, `is_train_batch_min` (here "
+ f"{deepspeed_plugin.is_train_batch_min} will decide the `train_batch_size` ({batch_size_per_device})."
+ )
+ else:
+ raise ValueError(
+ "When using DeepSpeed, `accelerate.prepare()` requires you to pass at least one of training or evaluation dataloaders "
+ "with `batch_size` attribute returning an integer value "
+ "or alternatively set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file "
+ "or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu']`."
+ )
+ else:
+ batch_size_per_device = deepspeed_plugin.get_value("train_micro_batch_size_per_gpu")
+
+ # handle `gradient_accumulation_steps` when the value is `auto`
+ deepspeed_plugin.fill_match(
+ "gradient_accumulation_steps",
+ must_match=False,
+ gradient_accumulation_steps=self.gradient_accumulation_steps,
+ )
+
+ config_kwargs = {
+ "train_micro_batch_size_per_gpu": batch_size_per_device,
+ "train_batch_size": batch_size_per_device
+ * deepspeed_plugin.get_value("gradient_accumulation_steps")
+ * self.num_processes,
+ "gradient_clipping": 1.0,
+ "zero_optimization.stage3_gather_16bit_weights_on_model_save": False,
+ }
+
+ model = None
+ optimizer = None
+ scheduler = None
+ for obj in result:
+ if isinstance(obj, torch.nn.Module):
+ model = obj
+ elif isinstance(obj, (torch.optim.Optimizer, DummyOptim)):
+ optimizer = obj
+ elif (isinstance(obj, (LRScheduler, DummyScheduler))) or (
+ type(obj).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES
+ ):
+ scheduler = obj
+
+ if optimizer is not None:
+ if "optimizer" in deepspeed_plugin.deepspeed_config and not isinstance(optimizer, (DummyOptim)):
+ raise ValueError(
+ "You cannot specify an optimizer in the config file and in the code at the same time. "
+ "Please remove the optimizer from the config file or "
+ "create `accelerate.utils.DummyOptim` in the code."
+ )
+ elif "optimizer" not in deepspeed_plugin.deepspeed_config and isinstance(optimizer, (DummyOptim)):
+ raise ValueError(
+ "You cannot create a `DummyOptim` without specifying an optimizer in the config file."
+ )
+
+ if isinstance(optimizer, (torch.optim.Optimizer)):
+ deepspeed_plugin.deepspeed_config["zero_allow_untested_optimizer"] = True
+
+ if scheduler is not None:
+ if "scheduler" in deepspeed_plugin.deepspeed_config and not isinstance(scheduler, (DummyScheduler)):
+ raise ValueError(
+ "You cannot specify a scheduler in the config file and in the code at the same time. "
+ "Please remove the scheduler from the config file or "
+ "create `accelerate.utils.DummyScheduler` in the code."
+ )
+ elif (
+ "scheduler" not in deepspeed_plugin.deepspeed_config
+ and isinstance(scheduler, (DummyScheduler))
+ and scheduler.lr_scheduler_callable is None
+ ):
+ raise ValueError(
+ "Either specify a scheduler in the config file or "
+ "pass in the `lr_scheduler_callable` parameter when using `accelerate.utils.DummyScheduler`."
+ )
+
+ if optimizer is not None and scheduler is not None:
+ if isinstance(optimizer, (DummyOptim)) and not isinstance(scheduler, (DummyScheduler)):
+ raise ValueError(
+ "You can only specify `accelerate.utils.DummyScheduler` in the code when using "
+ "`accelerate.utils.DummyOptim`."
+ )
+
+ if model is not None:
+ # deal with config keys that use `auto` value and rely on model's hidden_size
+ hidden_size_based_keys = [
+ "zero_optimization.reduce_bucket_size",
+ "zero_optimization.stage3_prefetch_bucket_size",
+ "zero_optimization.stage3_param_persistence_threshold",
+ ]
+ hidden_size_auto_keys = [x for x in hidden_size_based_keys if deepspeed_plugin.is_auto(x)]
+ if len(hidden_size_auto_keys) > 0:
+ reasoning = (
+ "therefore it's not possible to automatically fill out the following `auto` entries "
+ + f"in the DeepSpeed config file: {hidden_size_auto_keys}. You can fix that by replacing "
+ + "`auto` values for these keys with an integer value of your choice."
+ )
+ if not hasattr(model, "config"):
+ raise ValueError("Can't find `model.config` entry, " + reasoning)
+
+ if hasattr(model.config, "hidden_size"):
+ hidden_size = model.config.hidden_size
+ elif hasattr(model.config, "hidden_sizes"):
+ # if there are many hidden sizes pick the largest one
+ hidden_size = max(model.config.hidden_sizes)
+ else:
+ raise ValueError(
+ "Can find neither `model.config.hidden_size` nor `model.config.hidden_sizes`, " + reasoning
+ )
+
+ config_kwargs.update(
+ {
+ "zero_optimization.reduce_bucket_size": hidden_size * hidden_size,
+ "zero_optimization.stage3_prefetch_bucket_size": 0.9 * hidden_size * hidden_size,
+ "zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size,
+ }
+ )
+
+ if isinstance(optimizer, (DummyOptim)):
+ config_kwargs.update(
+ {"optimizer.params.lr": optimizer.lr, "optimizer.params.weight_decay": optimizer.weight_decay}
+ )
+ if isinstance(scheduler, (DummyScheduler)) and scheduler.lr_scheduler_callable is None:
+ max_lr = (
+ getattr(scheduler.optimizer, "lr", None)
+ if getattr(scheduler.optimizer, "defaults", None) is None
+ else scheduler.optimizer.defaults["lr"]
+ )
+ config_kwargs.update(
+ {
+ "scheduler.params.warmup_min_lr": 0,
+ "scheduler.params.warmup_max_lr": max_lr,
+ "scheduler.params.warmup_num_steps": scheduler.warmup_num_steps,
+ }
+ )
+ if scheduler.total_num_steps is not None:
+ config_kwargs["scheduler.params.total_num_steps"] = (
+ math.ceil(scheduler.total_num_steps / self.num_processes)
+ if not self.split_batches
+ else scheduler.total_num_steps
+ )
+ deepspeed_plugin.deepspeed_config_process(must_match=False, **config_kwargs)
+ self.deepspeed_config = deepspeed_plugin.deepspeed_config
+ kwargs = dict(model=model, config_params=self.deepspeed_config)
+ if optimizer is not None:
+ if isinstance(optimizer, (DummyOptim)):
+ kwargs["model_parameters"] = optimizer.params
+ if isinstance(scheduler, (DummyScheduler)) and scheduler.lr_scheduler_callable is not None:
+ kwargs["lr_scheduler"] = scheduler.lr_scheduler_callable
+ else:
+ if self.deepspeed_config["zero_optimization"].get("offload_optimizer", {}).get(
+ "device", "none"
+ ) != "none" and self.deepspeed_config.get("zero_force_ds_cpu_optimizer", True):
+ from deepspeed.ops.adam import DeepSpeedCPUAdam
+
+ defaults = {k: v for k, v in optimizer.defaults.items() if k in ["lr", "weight_decay"]}
+ optimizer = DeepSpeedCPUAdam(optimizer.param_groups, **defaults)
+ kwargs["optimizer"] = optimizer
+ if scheduler is not None:
+ if type(scheduler).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES:
+ kwargs["lr_scheduler"] = scheduler
+
+ engine, optimizer, _, lr_scheduler = deepspeed.initialize(**kwargs)
+ if optimizer is not None:
+ optimizer = DeepSpeedOptimizerWrapper(optimizer)
+ if scheduler is not None:
+ if lr_scheduler is None:
+ scheduler = AcceleratedScheduler(
+ scheduler,
+ optimizer,
+ step_with_optimizer=self.step_scheduler_with_optimizer,
+ split_batches=self.split_batches,
+ )
+ else:
+ scheduler = DeepSpeedSchedulerWrapper(lr_scheduler, optimizer)
+
+ for i in range(len(result)):
+ if isinstance(result[i], torch.nn.Module):
+ result[i] = engine
+ elif isinstance(result[i], (torch.optim.Optimizer, DummyOptim)):
+ result[i] = optimizer
+ elif (isinstance(result[i], (LRScheduler, DummyScheduler))) or (
+ type(result[i]).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES
+ ):
+ result[i] = scheduler
+ # pointing for deepspeed_engine_wrapped.backward()
+ self.deepspeed_engine_wrapped = DeepSpeedEngineWrapper(engine)
+ self._models.append(engine)
+ if optimizer is not None:
+ self._optimizers.append(optimizer)
+ if scheduler is not None:
+ self._schedulers.append(scheduler)
+ if len(self._models) > 1:
+ raise AssertionError(
+ "You can't use same `Accelerator()` instance with multiple models when using DeepSpeed"
+ )
+ return tuple(result)
+
+ def _prepare_megatron_lm(self, *args):
+ megatron_lm_plugin = self.state.megatron_lm_plugin
+ if not megatron_lm_plugin.megatron_dataset_flag:
+ batch_sizes = [obj.batch_size for obj in args if hasattr(obj, "batch_size")]
+ if len(batch_sizes) == 0:
+ raise ValueError(
+ "You must specify a training or evaluation dataloader in `accelerate.prepare()` when using Megatron-LM."
+ )
+
+ micro_batch_size = min(batch_sizes) if megatron_lm_plugin.is_train_batch_min else max(batch_sizes)
+ if len(batch_sizes) > 1:
+ logger.info(
+ "Since you passed both train and evaluation dataloader, `is_train_batch_min` (here "
+ f"{megatron_lm_plugin.is_train_batch_min} will decide the `train_batch_size` ({micro_batch_size})."
+ )
+ else:
+ for obj in args:
+ if isinstance(obj, MegatronLMDummyDataLoader):
+ micro_batch_size = obj.dataset_args["micro_batch_size"]
+ break
+
+ dp_degree = self.num_processes // (megatron_lm_plugin.tp_degree * megatron_lm_plugin.pp_degree)
+ megatron_lm_plugin.set_training_args(micro_batch_size, dp_degree)
+
+ model = None
+ optimizer = None
+ scheduler = None
+ is_dummy_scheduler = False
+ batch_data = None
+ for obj in args:
+ if isinstance(obj, torch.utils.data.DataLoader) and batch_data is None:
+ batch_data = next(iter(obj))
+ if isinstance(obj, torch.nn.Module):
+ model = obj
+ elif isinstance(obj, (torch.optim.Optimizer)):
+ optimizer = obj
+ elif isinstance(obj, (LRScheduler, MegatronLMDummyScheduler)):
+ scheduler = obj
+
+ if model is not None:
+ megatron_lm_plugin.set_network_size_args(model, batch_data)
+ if optimizer is not None:
+ megatron_lm_plugin.set_optimizer_type(optimizer)
+ if scheduler is not None:
+ is_dummy_scheduler = isinstance(scheduler, MegatronLMDummyScheduler)
+ if not is_dummy_scheduler:
+ raise ValueError(
+ "You can't use a custom scheduler with Megatron-LM. Please use the `accelerate.utils.MegatronLMDummyScheduler` instead."
+ )
+ megatron_lm_plugin.set_scheduler_args(scheduler)
+
+ # initialize megatron-lm
+ megatron_lm_initialize(self, args_defaults=megatron_lm_plugin.megatron_lm_default_args)
+ counter = 0
+ result = []
+ for obj in args:
+ if isinstance(obj, torch.utils.data.DataLoader):
+ result.append(megatron_lm_prepare_data_loader(self, obj))
+ counter += 1
+ elif isinstance(obj, MegatronLMDummyDataLoader):
+ if counter == 0:
+ obj.set_megatron_data_args()
+ dataloaders = megatron_lm_prepare_data_loader(self, obj)
+ result.append(dataloaders[counter])
+ counter += 1
+ else:
+ result.append(obj)
+
+ if model is not None:
+ model = megatron_lm_prepare_model(self)
+ if optimizer is not None:
+ optimizer = megatron_lm_prepare_optimizer(self, model)
+ if scheduler is not None:
+ scheduler = megatron_lm_prepare_scheduler(self, optimizer, scheduler)
+
+ if model is not None:
+ model = MegatronEngine(self, model, optimizer, scheduler)
+ if optimizer is not None:
+ optimizer = MegatronLMOptimizerWrapper(optimizer)
+ if scheduler is not None:
+ scheduler = MegatronLMSchedulerWrapper(scheduler, optimizer)
+
+ for i in range(len(result)):
+ if isinstance(result[i], torch.nn.Module):
+ result[i] = model
+ elif isinstance(result[i], torch.optim.Optimizer):
+ result[i] = optimizer
+ elif isinstance(result[i], MegatronLMDummyScheduler):
+ result[i] = scheduler
+ if model is not None:
+ self._models.append(model)
+ if optimizer is not None:
+ self._optimizers.append(optimizer)
+ if scheduler is not None:
+ self._schedulers.append(scheduler)
+ if len(self._models) > 1:
+ raise AssertionError(
+ "You can't use same `Accelerator()` instance with multiple models when using Megatron-LM"
+ )
+ return tuple(result)
+
+ def _prepare_ipex(self, *args):
+ if not is_ipex_available():
+ raise ImportError(
+ "IPEX is not installed or IPEX's version does not match current PyTorch version. Please refer"
+ " to https://github.com/intel/intel-extension-for-pytorch."
+ )
+ else:
+ import intel_extension_for_pytorch as ipex
+
+ model = None
+ optimizer = None
+ result = [obj for obj in args]
+ for obj in result:
+ if isinstance(obj, torch.nn.Module):
+ model = obj
+ model.train()
+ elif isinstance(obj, (torch.optim.Optimizer)):
+ optimizer = obj
+ if optimizer is not None and model is not None:
+ dtype = torch.bfloat16 if self.state.mixed_precision == "bf16" else None
+ if self.device.type == "xpu" and is_xpu_available():
+ model = model.to(self.device)
+ model, optimizer = torch.xpu.optimize(
+ model, optimizer=optimizer, dtype=dtype, inplace=True, level="O1"
+ )
+ else:
+ model, optimizer = ipex.optimize(model, optimizer=optimizer, dtype=dtype, inplace=True, level="O1")
+ for i in range(len(result)):
+ if isinstance(result[i], torch.nn.Module):
+ result[i] = model
+ elif isinstance(result[i], (torch.optim.Optimizer)):
+ result[i] = optimizer
+ return tuple(result)
+
+ def _prepare_msamp(self, *args):
+ if not is_msamp_available():
+ raise ImportError(
+ "MS-AMP was not found on your system. Please ensure that MS-AMP is available "
+ " or choose `'te'` as the backend for FP8 mixed precision training."
+ )
+ else:
+ import msamp
+
+ model, optimizer = None, None
+ num_models, num_optimizers = 0, 0
+ result = [obj for obj in args]
+ for obj in result:
+ if isinstance(obj, torch.nn.Module):
+ model = obj
+ num_models += 1
+ elif isinstance(obj, (torch.optim.Optimizer)):
+ optimizer = obj
+ num_optimizers += 1
+ if optimizer is None or model is None:
+ raise ValueError(
+ "You must pass a model and an optimizer together to `accelerate.prepare()` when using MS-AMP."
+ )
+ elif num_models > 1 or num_optimizers > 1:
+ raise ValueError(
+ f"You can't use multiple models ({num_models}) or optimizers {num_optimizers} with MS-AMP."
+ )
+ else:
+ model, optimizer = msamp.initialize(model, optimizer, opt_level=self.fp8_recipe_handler.opt_level)
+ for i in range(len(result)):
+ if isinstance(result[i], torch.nn.Module):
+ result[i] = model
+ elif isinstance(result[i], (torch.optim.Optimizer)):
+ result[i] = optimizer
+ return tuple(result)
+
+ def prepare_data_loader(
+ self, data_loader: torch.utils.data.DataLoader, device_placement=None, slice_fn_for_dispatch=None
+ ):
+ """
+ Prepares a PyTorch DataLoader for training in any distributed setup. It is recommended to use
+ [`Accelerator.prepare`] instead.
+
+ Args:
+ data_loader (`torch.utils.data.DataLoader`):
+ A vanilla PyTorch DataLoader to prepare
+ device_placement (`bool`, *optional*):
+ Whether or not to place the batches on the proper device in the prepared dataloader. Will default to
+ `self.device_placement`.
+ slice_fn_for_dispatch (`Callable`, *optional*`):
+ If passed, this function will be used to slice tensors across `num_processes`. Will default to
+ [`~utils.slice_tensors`]. This argument is used only when `dispatch_batches` is set to `True` and will
+ be ignored otherwise.
+
+ Example:
+
+ ```python
+ >>> import torch
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> data_loader = torch.utils.data.DataLoader(...)
+ >>> data_loader = accelerator.prepare_data_loader(data_loader, device_placement=True)
+ ```
+ """
+ # Ensure we can't double wrap a DataLoader due to `find_batch_size`
+ if getattr(data_loader, "_is_accelerate_prepared", False):
+ if data_loader not in self._dataloaders:
+ self._dataloaders.append(data_loader)
+ return data_loader
+ if device_placement is None:
+ device_placement = self.device_placement if self.distributed_type != DistributedType.XLA else False
+ prepared_data_loader = prepare_data_loader(
+ data_loader,
+ self.device,
+ num_processes=self.num_processes,
+ process_index=self.process_index,
+ split_batches=self.split_batches,
+ put_on_device=device_placement,
+ rng_types=self.rng_types.copy(),
+ dispatch_batches=self.dispatch_batches,
+ even_batches=self.even_batches,
+ slice_fn_for_dispatch=slice_fn_for_dispatch,
+ use_seedable_sampler=self.use_seedable_sampler,
+ )
+ self._dataloaders.append(prepared_data_loader)
+ return prepared_data_loader
+
+ def prepare_optimizer(self, optimizer: torch.optim.Optimizer, device_placement=None):
+ """
+ Prepares a PyTorch Optimizer for training in any distributed setup. It is recommended to use
+ [`Accelerator.prepare`] instead.
+
+ Args:
+ optimizer (`torch.optim.Optimizer`):
+ A vanilla PyTorch optimizer to prepare
+ device_placement (`bool`, *optional*):
+ Whether or not to place the optimizer on the proper device. Will default to `self.device_placement`.
+
+ Example:
+
+ ```python
+ >>> import torch
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> optimizer = torch.optim.Adam(...)
+ >>> optimizer = accelerator.prepare_optimizer(optimizer, device_placement=True)
+ ```
+ """
+ # Ensure we can't double wrap an optimizer due to `find_batch_size`
+ if getattr(optimizer, "_is_accelerate_prepared", False):
+ if optimizer not in self._optimizers:
+ self._optimizers.append(optimizer)
+ return optimizer
+ if device_placement is None:
+ device_placement = self.device_placement
+ optimizer = AcceleratedOptimizer(optimizer, device_placement=device_placement, scaler=self.scaler)
+ self._optimizers.append(optimizer)
+ return optimizer
+
+ def prepare_scheduler(self, scheduler: LRScheduler):
+ """
+ Prepares a PyTorch Scheduler for training in any distributed setup. It is recommended to use
+ [`Accelerator.prepare`] instead.
+
+ Args:
+ scheduler (`torch.optim.lr_scheduler.LRScheduler`):
+ A vanilla PyTorch scheduler to prepare
+
+ Example:
+
+ ```python
+ >>> import torch
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> optimizer = torch.optim.Adam(...)
+ >>> scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, ...)
+ >>> scheduler = accelerator.prepare_scheduler(scheduler)
+ ```
+ """
+ # Ensure we can't double wrap a scheduler due to `find_batch_size`
+ if getattr(scheduler, "_is_accelerate_prepared", False):
+ if scheduler not in self._schedulers:
+ self._schedulers.append(scheduler)
+ return scheduler
+ # We try to find the optimizer associated with `scheduler`, the default is the full list.
+ optimizer = self._optimizers
+ for opt in self._optimizers:
+ if getattr(scheduler, "optimizer", None) == opt.optimizer:
+ optimizer = opt
+ break
+ scheduler = AcceleratedScheduler(
+ scheduler,
+ optimizer,
+ step_with_optimizer=self.step_scheduler_with_optimizer,
+ split_batches=self.split_batches,
+ )
+ self._schedulers.append(scheduler)
+ return scheduler
+
+ def backward(self, loss, **kwargs):
+ """
+ Scales the gradients in accordance to the `GradientAccumulationPlugin` and calls the correct `backward()` based
+ on the configuration.
+
+ Should be used in lieu of `loss.backward()`.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator(gradient_accumulation_steps=2)
+ >>> outputs = model(inputs)
+ >>> loss = loss_fn(outputs, labels)
+ >>> accelerator.backward(loss)
+ ```
+ """
+ if self.distributed_type != DistributedType.DEEPSPEED:
+ # deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`
+ loss = loss / self.gradient_accumulation_steps
+ if self.distributed_type == DistributedType.DEEPSPEED:
+ self.deepspeed_engine_wrapped.backward(loss, **kwargs)
+ elif self.distributed_type == DistributedType.MEGATRON_LM:
+ return
+ elif self.scaler is not None:
+ self.scaler.scale(loss).backward(**kwargs)
+ else:
+ loss.backward(**kwargs)
+
+ def set_trigger(self):
+ """
+ Sets the internal trigger tensor to 1 on the current process. A latter check should follow using this which
+ will check across all processes.
+
+ Note:
+ Does not require `wait_for_everyone()`
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> # Assume later in the training script
+ >>> # `should_do_breakpoint` is a custom function to monitor when to break,
+ >>> # e.g. when the loss is NaN
+ >>> if should_do_breakpoint(loss):
+ ... accelerator.set_trigger()
+ >>> # Assume later in the training script
+ >>> if accelerator.check_breakpoint():
+ ... break
+ ```
+ """
+ self.flag_tensor = torch.tensor(1, device=self.device)
+
+ def check_trigger(self):
+ """
+ Checks if the internal trigger tensor has been set to 1 in any of the processes. If so, will return `True` and
+ reset the trigger tensor to 0.
+
+ Note:
+ Does not require `wait_for_everyone()`
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> # Assume later in the training script
+ >>> # `should_do_breakpoint` is a custom function to monitor when to break,
+ >>> # e.g. when the loss is NaN
+ >>> if should_do_breakpoint(loss):
+ ... accelerator.set_trigger()
+ >>> # Assume later in the training script
+ >>> if accelerator.check_trigger():
+ ... break
+ ```
+ """
+ # Now that we are outside `__init__`, we can initialize it if it is `None` on device
+ if self.flag_tensor is None:
+ self.flag_tensor = torch.tensor(0, device=self.device)
+ flag_tensor = self.reduce(self.flag_tensor)
+ if flag_tensor.item() >= 1:
+ self.flag_tensor = torch.tensor(0, device=self.device)
+ return True
+ return False
+
+ def unscale_gradients(self, optimizer=None):
+ """
+ Unscale the gradients in mixed precision training with AMP. This is a noop in all other settings.
+
+ Likely should be called through [`Accelerator.clip_grad_norm_`] or [`Accelerator.clip_grad_value_`]
+
+ Args:
+ optimizer (`torch.optim.Optimizer` or `list[torch.optim.Optimizer]`, *optional*):
+ The optimizer(s) for which to unscale gradients. If not set, will unscale gradients on all optimizers
+ that were passed to [`~Accelerator.prepare`].
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> model, optimizer = accelerator.prepare(model, optimizer)
+ >>> outputs = model(inputs)
+ >>> loss = loss_fn(outputs, labels)
+ >>> accelerator.backward(loss)
+ >>> accelerator.unscale_gradients(optimizer=optimizer)
+ ```
+ """
+ if self.native_amp and self.mixed_precision == "fp16":
+ if optimizer is None:
+ # TODO: this unscales all optimizers where we should only unscale the one where parameters are.
+ optimizer = self._optimizers
+ elif not isinstance(optimizer, (tuple, list)):
+ optimizer = [optimizer]
+ for opt in optimizer:
+ while isinstance(opt, AcceleratedOptimizer):
+ opt = opt.optimizer
+ self.scaler.unscale_(opt)
+
+ def clip_grad_norm_(self, parameters, max_norm, norm_type=2):
+ """
+ Should be used in place of `torch.nn.utils.clip_grad_norm_`.
+
+ Returns:
+ `torch.Tensor`: Total norm of the parameter gradients (viewed as a single vector).
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator(gradient_accumulation_steps=2)
+ >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler)
+
+ >>> for input, target in dataloader:
+ ... optimizer.zero_grad()
+ ... output = model(input)
+ ... loss = loss_func(output, target)
+ ... accelerator.backward(loss)
+ ... if accelerator.sync_gradients:
+ ... accelerator.clip_grad_norm_(model.parameters(), max_grad_norm)
+ ... optimizer.step()
+ ```
+ """
+ if self.distributed_type == DistributedType.FSDP:
+ self.unscale_gradients()
+ parameters = [p for p in parameters]
+ for model in self._models:
+ if parameters == [p for p in model.parameters()]:
+ return model.clip_grad_norm_(max_norm, norm_type)
+ elif self.distributed_type == DistributedType.DEEPSPEED:
+ # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
+ # We cannot return the gradient norm because DeepSpeed does it.
+ return None
+ elif self.distributed_type == DistributedType.XLA:
+ # Reduce gradients first for XLA
+ for acc_opt in self._optimizers:
+ if not acc_opt.gradient_state.is_xla_gradients_synced:
+ opt = acc_opt
+ while isinstance(opt, AcceleratedOptimizer):
+ opt = opt.optimizer
+ gradients = xm._fetch_gradients(opt)
+ # Use xm.all_reduce to perform an in-place all-reduce. Recusrsive all-reduce each tensor
+ # one by one in self.reduce is non-inplace.
+ xm.all_reduce("sum", gradients, scale=1.0 / self.num_processes)
+ # Set is_xla_gradients_synced to True to avoid all-reduce twice in the AcceleratedOptimizer step.
+ acc_opt.gradient_state.is_xla_gradients_synced = True
+ self.unscale_gradients()
+ return torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type)
+
+ def clip_grad_value_(self, parameters, clip_value):
+ """
+ Should be used in place of `torch.nn.utils.clip_grad_value_`.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator(gradient_accumulation_steps=2)
+ >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler)
+
+ >>> for input, target in dataloader:
+ ... optimizer.zero_grad()
+ ... output = model(input)
+ ... loss = loss_func(output, target)
+ ... accelerator.backward(loss)
+ ... if accelerator.sync_gradients:
+ ... accelerator.clip_grad_value_(model.parameters(), clip_value)
+ ... optimizer.step()
+ ```
+ """
+ if self.distributed_type in [DistributedType.DEEPSPEED, DistributedType.FSDP]:
+ raise Exception("DeepSpeed and FSDP do not support `clip_grad_value_`. Use `clip_grad_norm_` instead.")
+ self.unscale_gradients()
+ torch.nn.utils.clip_grad_value_(parameters, clip_value)
+
+ def gather(self, tensor):
+ """
+ Gather the values in *tensor* across all processes and concatenate them on the first dimension. Useful to
+ regroup the predictions from all processes when doing evaluation.
+
+ Note:
+ This gather happens in all processes.
+
+ Args:
+ tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`):
+ The tensors to gather across all processes.
+
+ Returns:
+ `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`: The gathered tensor(s). Note that the
+ first dimension of the result is *num_processes* multiplied by the first dimension of the input tensors.
+
+ Example:
+
+ ```python
+ >>> # Assuming four processes
+ >>> import torch
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> process_tensor = torch.tensor([accelerator.process_index])
+ >>> gathered_tensor = accelerator.gather(process_tensor)
+ >>> gathered_tensor
+ tensor([0, 1, 2, 3])
+ ```
+ """
+ return gather(tensor)
+
+ def gather_for_metrics(self, input_data):
+ """
+ Gathers `input_data` and potentially drops duplicates in the last batch if on a distributed system. Should be
+ used for gathering the inputs and targets for metric calculation.
+
+ Args:
+ input (`torch.Tensor`, `object`, a nested tuple/list/dictionary of `torch.Tensor`, or a nested tuple/list/dictionary of `object`):
+ The tensors or objects for calculating metrics across all processes
+
+ Example:
+
+ ```python
+ >>> # Assuming two processes, with a batch size of 5 on a dataset with 9 samples
+ >>> import torch
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> dataloader = torch.utils.data.DataLoader(range(9), batch_size=5)
+ >>> dataloader = accelerator.prepare(dataloader)
+ >>> batch = next(iter(dataloader))
+ >>> gathered_items = accelerator.gather_for_metrics(batch)
+ >>> len(gathered_items)
+ 9
+ ```
+ """
+
+ try:
+ recursively_apply(lambda x: x, input_data, error_on_other_type=True)
+ all_tensors = True
+ except TypeError:
+ all_tensors = False
+
+ if not all_tensors:
+ data = gather_object(input_data)
+ else:
+ data = self.gather(input_data)
+
+ try:
+ if self.gradient_state.end_of_dataloader:
+ # at the end of a dataloader, `gather_for_metrics` regresses to
+ # `gather` unless the dataset has a remainder so log.
+ if self.gradient_state.remainder == -1:
+ logger.info(
+ "The used dataset had no length, returning gathered tensors. You should drop the remainder yourself."
+ )
+ return data
+ elif self.gradient_state.remainder > 0:
+ # Last batch needs to be truncated on distributed systems as it contains additional samples
+ def _adjust_samples(tensor):
+ return tensor[: self.gradient_state.remainder]
+
+ return recursively_apply(_adjust_samples, data)
+ else: # remainder is 0
+ # no remainder even though at end of dataloader, so nothing to do.
+ return data
+ else:
+ # Not at the end of the dataloader, no need to adjust the tensors
+ return data
+ except Exception:
+ # Dataset had no length or raised an error
+ return data
+
+ def reduce(self, tensor, reduction="sum", scale=1.0):
+ """
+ Reduce the values in *tensor* across all processes based on *reduction*.
+
+ Note:
+ All processes get the reduced value.
+
+ Args:
+ tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`):
+ The tensors to reduce across all processes.
+ reduction (`str`, *optional*, defaults to "sum"):
+ A reduction type, can be one of 'sum', 'mean', or 'none'. If 'none', will not perform any operation.
+ scale (`float`, *optional*, defaults to 1.0):
+ A default scaling value to be applied after the reduce, only valied on XLA.
+
+ Returns:
+ `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`:
+ The reduced tensor(s).
+
+ Example:
+
+ ```python
+ >>> # Assuming two processes
+ >>> import torch
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> process_tensor = torch.arange(accelerator.num_processes) + 1 + (2 * accelerator.process_index)
+ >>> process_tensor = process_tensor.to(accelerator.device)
+ >>> reduced_tensor = accelerator.reduce(process_tensor, reduction="sum")
+ >>> reduced_tensor
+ tensor([4, 6])
+ ```
+ """
+ return reduce(tensor, reduction, scale)
+
+ def pad_across_processes(self, tensor, dim=0, pad_index=0, pad_first=False):
+ """
+ Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so
+ they can safely be gathered.
+
+ Args:
+ tensor (nested list/tuple/dictionary of `torch.Tensor`):
+ The data to gather.
+ dim (`int`, *optional*, defaults to 0):
+ The dimension on which to pad.
+ pad_index (`int`, *optional*, defaults to 0):
+ The value with which to pad.
+ pad_first (`bool`, *optional*, defaults to `False`):
+ Whether to pad at the beginning or the end.
+
+ Returns:
+ `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`:
+ The padded tensor(s).
+
+ Example:
+
+ ```python
+ >>> # Assuming two processes, with the first processes having a tensor of size 1 and the second of size 2
+ >>> import torch
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> process_tensor = torch.arange(accelerator.process_index + 1).to(accelerator.device)
+ >>> padded_tensor = accelerator.pad_across_processes(process_tensor)
+ >>> padded_tensor.shape
+ torch.Size([2])
+ ```
+ """
+ return pad_across_processes(tensor, dim=dim, pad_index=pad_index, pad_first=pad_first)
+
+ def unwrap_model(self, model, keep_fp32_wrapper: bool = True):
+ """
+ Unwraps the `model` from the additional layer possible added by [`~Accelerator.prepare`]. Useful before saving
+ the model.
+
+ Args:
+ model (`torch.nn.Module`):
+ The model to unwrap.
+ keep_fp32_wrapper (`bool`, *optional*, defaults to `True`):
+ Whether to not remove the mixed precision hook if it was added.
+
+ Returns:
+ `torch.nn.Module`: The unwrapped model.
+
+ Example:
+
+ ```python
+ >>> # Assuming two GPU processes
+ >>> from torch.nn.parallel import DistributedDataParallel
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> model = accelerator.prepare(MyModel())
+ >>> print(model.__class__.__name__)
+ DistributedDataParallel
+
+ >>> model = accelerator.unwrap_model(model)
+ >>> print(model.__class__.__name__)
+ MyModel
+ ```
+ """
+ return extract_model_from_parallel(model, keep_fp32_wrapper)
+
+ def wait_for_everyone(self):
+ """
+ Will stop the execution of the current process until every other process has reached that point (so this does
+ nothing when the script is only run in one process). Useful to do before saving a model.
+
+ Example:
+
+ ```python
+ >>> # Assuming two GPU processes
+ >>> import time
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> if accelerator.is_main_process:
+ ... time.sleep(2)
+ >>> else:
+ ... print("I'm waiting for the main process to finish its sleep...")
+ >>> accelerator.wait_for_everyone()
+ >>> # Should print on every process at the same time
+ >>> print("Everyone is here")
+ ```
+ """
+ wait_for_everyone()
+
+ @on_main_process
+ def init_trackers(self, project_name: str, config: dict | None = None, init_kwargs: dict | None = {}):
+ """
+ Initializes a run for all trackers stored in `self.log_with`, potentially with starting configurations
+
+ Args:
+ project_name (`str`):
+ The name of the project. All trackers will save their data based on this
+ config (`dict`, *optional*):
+ Optional starting configuration to be logged.
+ init_kwargs (`dict`, *optional*):
+ A nested dictionary of kwargs to be passed to a specific tracker's `__init__` function. Should be
+ formatted like so:
+ ```python
+ {"wandb": {"tags": ["tag_a", "tag_b"]}}
+ ```
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator(log_with="tensorboard")
+ >>> accelerator.init_trackers(
+ ... project_name="my_project",
+ ... config={"learning_rate": 0.001, "batch_size": 32},
+ ... init_kwargs={"tensorboard": {"flush_secs": 60}},
+ ... )
+ ```
+ """
+ for tracker in self.log_with:
+ if issubclass(type(tracker), GeneralTracker):
+ # Custom trackers are already initialized
+ self.trackers.append(tracker)
+ else:
+ tracker_init = LOGGER_TYPE_TO_CLASS[str(tracker)]
+ if tracker_init.requires_logging_directory:
+ # We can skip this check since it was done in `__init__`
+ self.trackers.append(
+ tracker_init(project_name, self.logging_dir, **init_kwargs.get(str(tracker), {}))
+ )
+ else:
+ self.trackers.append(tracker_init(project_name, **init_kwargs.get(str(tracker), {})))
+ if config is not None:
+ for tracker in self.trackers:
+ tracker.store_init_configuration(config)
+
+ def get_tracker(self, name: str, unwrap: bool = False):
+ """
+ Returns a `tracker` from `self.trackers` based on `name` on the main process only.
+
+ Args:
+ name (`str`):
+ The name of a tracker, corresponding to the `.name` property.
+ unwrap (`bool`):
+ Whether to return the internal tracking mechanism or to return the wrapped tracker instead
+ (recommended).
+
+ Returns:
+ `GeneralTracker`: The tracker corresponding to `name` if it exists.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator(log_with="tensorboard")
+ >>> accelerator.init_trackers("my_project")
+ >>> tensorboard_tracker = accelerator.get_tracker("tensorboard")
+ ```
+ """
+ if len(self.trackers) > 0:
+ for tracker in self.trackers:
+ if tracker.name == name:
+ return tracker.tracker if unwrap else tracker
+ raise ValueError(f"{name} is not an available tracker stored inside the `Accelerator`.")
+ # Handle tracker only made on main process
+ return GeneralTracker(_blank=True)
+
+ @on_main_process
+ def log(self, values: dict, step: int | None = None, log_kwargs: dict | None = {}):
+ """
+ Logs `values` to all stored trackers in `self.trackers` on the main process only.
+
+ Args:
+ values (`dict`):
+ Values should be a dictionary-like object containing only types `int`, `float`, or `str`.
+ step (`int`, *optional*):
+ The run step. If included, the log will be affiliated with this step.
+ log_kwargs (`dict`, *optional*):
+ A nested dictionary of kwargs to be passed to a specific tracker's `log` function. Should be formatted
+ like so:
+ ```python
+ {"wandb": {"tags": ["tag_a", "tag_b"]}}
+ ```
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator(log_with="tensorboard")
+ >>> accelerator.init_trackers("my_project")
+ >>> accelerator.log({"loss": 0.5, "accuracy": 0.9})
+ ```
+ """
+ for tracker in self.trackers:
+ tracker.log(values, step=step, **log_kwargs.get(tracker.name, {}))
+
+ @on_main_process
+ def end_training(self):
+ """
+ Runs any special end training behaviors, such as stopping trackers on the main process only. Should always be
+ called at the end of your script if using experiment tracking.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator(log_with="tensorboard")
+ >>> accelerator.init_trackers("my_project")
+ >>> # Do training
+ >>> accelerator.end_training()
+ ```
+ """
+ for tracker in self.trackers:
+ tracker.finish()
+
+ def save(self, obj, f, safe_serialization=False):
+ """
+ Save the object passed to disk once per machine. Use in place of `torch.save`.
+
+ Args:
+ obj (`object`): The object to save.
+ f (`str` or `os.PathLike`): Where to save the content of `obj`.
+ safe_serialization (`bool`, *optional*, defaults to `False`): Whether to save `obj` using `safetensors`
+
+ Note:
+ If `save_on_each_node` was passed in as a `ProjectConfiguration`, will save the object once per node,
+ rather than only once on the main node.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> arr = [0, 1, 2, 3]
+ >>> accelerator.save(arr, "array.pkl")
+ ```
+ """
+ save(
+ obj,
+ f,
+ save_on_each_node=self.project_configuration.save_on_each_node,
+ safe_serialization=safe_serialization,
+ )
+
+ def save_model(
+ self,
+ model: torch.nn.Module,
+ save_directory: Union[str, os.PathLike],
+ max_shard_size: Union[int, str] = "10GB",
+ safe_serialization: bool = True,
+ ):
+ """
+ Save a model so that it can be re-loaded using load_checkpoint_in_model
+
+ Arguments:
+ model: (`torch.nn.Module`):
+ Model to be saved. The model can be wrapped or unwraped.
+ save_directory (`str` or `os.PathLike`):
+ Directory to which to save. Will be created if it doesn't exist.
+ max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
+ The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size
+ lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`).
+
+
+
+ If a single weight of the model is bigger than `max_shard_size`, it will be in its own checkpoint shard
+ which will be bigger than `max_shard_size`.
+
+
+
+ safe_serialization (`bool`, *optional*, defaults to `True`):
+ Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> model = ...
+ >>> accelerator.save_model(model, save_directory)
+ ```
+ """
+
+ if os.path.isfile(save_directory):
+ logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
+ return
+
+ os.makedirs(save_directory, exist_ok=True)
+
+ # get the state_dict of the model
+ if any(
+ [
+ module._hf_hook.offload
+ for module in model.modules()
+ if hasattr(module, "_hf_hook") and isinstance(module._hf_hook, AlignDevicesHook)
+ ]
+ ):
+ state_dict = get_state_dict_offloaded_model(model)
+ else:
+ if any(param.device == torch.device("meta") for param in model.parameters()):
+ raise RuntimeError("You can't save the model since some parameters are on the meta device.")
+ state_dict = self.get_state_dict(model)
+
+ if safe_serialization:
+ state_dict = clean_state_dict_for_safetensors(state_dict)
+ weights_name = SAFE_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME
+
+ # Shard the model if it is too big.
+ shards, index = shard_checkpoint(state_dict, max_shard_size=max_shard_size, weights_name=weights_name)
+
+ # Clean the folder from a previous save
+ for filename in os.listdir(save_directory):
+ full_filename = os.path.join(save_directory, filename)
+ # If we have a shard file that is not going to be replaced, we delete it, but only from the main process
+ # in distributed settings to avoid race conditions.
+ weights_no_suffix = weights_name.replace(".bin", "")
+
+ # make sure that file to be deleted matches format of sharded file, e.g. pytorch_model-00001-of-00005
+ filename_no_suffix = filename.replace(".bin", "")
+ reg = re.compile(r"(.*?)-\d{5}-of-\d{5}")
+
+ if (
+ filename.startswith(weights_no_suffix)
+ and os.path.isfile(full_filename)
+ and filename not in shards.keys()
+ and reg.fullmatch(filename_no_suffix) is not None
+ and PartialState().is_main_process
+ ):
+ os.remove(full_filename)
+
+ # Save the model
+ for shard_file, shard in shards.items():
+ self.save(shard, os.path.join(save_directory, shard_file), safe_serialization=safe_serialization)
+
+ if index is None:
+ path_to_weights = os.path.join(save_directory, WEIGHTS_NAME)
+ logger.info(f"Model weights saved in {path_to_weights}")
+ else:
+ save_index_file = SAFE_WEIGHTS_INDEX_NAME if safe_serialization else WEIGHTS_INDEX_NAME
+ save_index_file = os.path.join(save_directory, save_index_file)
+ # Save the index as well
+ with open(save_index_file, "w", encoding="utf-8") as f:
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
+ f.write(content)
+ logger.info(
+ f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be "
+ f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the "
+ f"index located at {save_index_file}."
+ )
+
+ def register_save_state_pre_hook(self, hook: Callable[..., None]) -> hooks.RemovableHandle:
+ """
+ Registers a pre hook to be run before `save_checkpoint` is called in [`Accelerator.save_state`].
+
+ Args:
+ hook (`Callable`):
+ A function to be called in [`Accelerator.save_state`] before `save_checkpoint`.
+
+ The hook should have the following signature:
+
+ `hook(models: list[torch.nn.Module], weights: list[dict[str, torch.Tensor]], input_dir: str) -> None`
+
+ The `models` argument are the models as saved in the accelerator state under `accelerator._models`, `weigths`
+ argument are the state dicts of the `models`, and the `input_dir` argument is the `input_dir` argument passed
+ to [`Accelerator.load_state`].
+
+
+
+ Should only be used in conjunction with [`Accelerator.register_load_state_pre_hook`]. Can be useful to save
+ configurations in addition to model weights. Can also be used to overwrite model saving with a customized
+ method. In this case, make sure to remove already loaded weights from the weights list.
+
+
+
+ Returns:
+ `torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling
+ `handle.remove()`
+ """
+ handle = hooks.RemovableHandle(self._save_model_state_pre_hook)
+ self._save_model_state_pre_hook[handle.id] = hook
+ return handle
+
+ def save_state(self, output_dir: str = None, safe_serialization: bool = True, **save_model_func_kwargs):
+ """
+ Saves the current states of the model, optimizer, scaler, RNG generators, and registered objects to a folder.
+
+ If a `ProjectConfiguration` was passed to the `Accelerator` object with `automatic_checkpoint_naming` enabled
+ then checkpoints will be saved to `self.project_dir/checkpoints`. If the number of current saves is greater
+ than `total_limit` then the oldest save is deleted. Each checkpoint is saved in seperate folders named
+ `checkpoint_`.
+
+ Otherwise they are just saved to `output_dir`.
+
+
+
+ Should only be used when wanting to save a checkpoint during training and restoring the state in the same
+ environment.
+
+
+
+ Args:
+ output_dir (`str` or `os.PathLike`):
+ The name of the folder to save all relevant weights and states.
+ safe_serialization (`bool`, *optional*, defaults to `True`):
+ Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
+ save_model_func_kwargs (`dict`, *optional*):
+ Additional keyword arguments for saving model which can be passed to the underlying save function, such
+ as optional arguments for DeepSpeed's `save_checkpoint` function.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> model, optimizer, lr_scheduler = ...
+ >>> model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler)
+ >>> accelerator.save_state(output_dir="my_checkpoint")
+ ```
+ """
+ if self.project_configuration.automatic_checkpoint_naming:
+ output_dir = os.path.join(self.project_dir, "checkpoints")
+ os.makedirs(output_dir, exist_ok=True)
+ if self.project_configuration.automatic_checkpoint_naming:
+ folders = [os.path.join(output_dir, folder) for folder in os.listdir(output_dir)]
+ if (
+ self.project_configuration.total_limit is not None
+ and (len(folders) + 1 > self.project_configuration.total_limit)
+ and self.is_main_process
+ ):
+
+ def _inner(folder):
+ return list(map(int, re.findall(r"[\/]?([0-9]+)(?=[^\/]*$)", folder)))[0]
+
+ folders.sort(key=_inner)
+ logger.warning(
+ f"Deleting {len(folders) + 1 - self.project_configuration.total_limit} checkpoints to make room for new checkpoint."
+ )
+ for folder in folders[: len(folders) + 1 - self.project_configuration.total_limit]:
+ shutil.rmtree(folder)
+ output_dir = os.path.join(output_dir, f"checkpoint_{self.save_iteration}")
+ if os.path.exists(output_dir):
+ raise ValueError(
+ f"Checkpoint directory {output_dir} ({self.save_iteration}) already exists. Please manually override `self.save_iteration` with what iteration to start with."
+ )
+ self.wait_for_everyone()
+ os.makedirs(output_dir, exist_ok=True)
+ logger.info(f"Saving current state to {output_dir}")
+
+ if self.distributed_type == DistributedType.XLA:
+ # Finish running the previous step before checkpointing
+ xm.mark_step()
+
+ # Save the models taking care of FSDP and DeepSpeed nuances
+ weights = []
+ for i, model in enumerate(self._models):
+ if self.distributed_type == DistributedType.FSDP:
+ logger.info("Saving FSDP model")
+ save_fsdp_model(self.state.fsdp_plugin, self, model, output_dir, i)
+ logger.info(f"FSDP Model saved to output dir {output_dir}")
+ elif self.distributed_type == DistributedType.DEEPSPEED:
+ logger.info("Saving DeepSpeed Model and Optimizer")
+ ckpt_id = f"{MODEL_NAME}" if i == 0 else f"{MODEL_NAME}_{i}"
+ model.save_checkpoint(output_dir, ckpt_id, **save_model_func_kwargs)
+ logger.info(f"DeepSpeed Model and Optimizer saved to output dir {os.path.join(output_dir, ckpt_id)}")
+ elif self.distributed_type == DistributedType.MEGATRON_LM:
+ logger.info("Saving Megatron-LM Model, Optimizer and Scheduler")
+ model.save_checkpoint(output_dir)
+ logger.info(f"Megatron-LM Model , Optimizer and Scheduler saved to output dir {output_dir}")
+ else:
+ weights.append(self.get_state_dict(model, unwrap=False))
+
+ # Save the optimizers taking care of FSDP and DeepSpeed nuances
+ optimizers = []
+ if self.distributed_type == DistributedType.FSDP:
+ for i, opt in enumerate(self._optimizers):
+ logger.info("Saving FSDP Optimizer")
+ save_fsdp_optimizer(self.state.fsdp_plugin, self, opt, self._models[i], output_dir, i)
+ logger.info(f"FSDP Optimizer saved to output dir {output_dir}")
+ elif self.distributed_type not in [DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]:
+ optimizers = self._optimizers
+
+ # Save the lr schedulers taking care of DeepSpeed nuances
+ schedulers = []
+ if self.distributed_type == DistributedType.DEEPSPEED:
+ for i, scheduler in enumerate(self._schedulers):
+ if isinstance(scheduler, DeepSpeedSchedulerWrapper):
+ continue
+ schedulers.append(scheduler)
+ elif self.distributed_type not in [DistributedType.MEGATRON_LM]:
+ schedulers = self._schedulers
+
+ # Save the samplers of the dataloaders
+ dataloaders = self._dataloaders
+
+ # Call model loading hooks that might have been registered with
+ # accelerator.register_model_state_hook
+ for hook in self._save_model_state_pre_hook.values():
+ hook(self._models, weights, output_dir)
+
+ save_location = save_accelerator_state(
+ output_dir,
+ weights,
+ optimizers,
+ schedulers,
+ dataloaders,
+ self.state.process_index,
+ self.scaler,
+ save_on_each_node=self.project_configuration.save_on_each_node,
+ safe_serialization=safe_serialization,
+ )
+ for i, obj in enumerate(self._custom_objects):
+ save_custom_state(obj, output_dir, i, save_on_each_node=self.project_configuration.save_on_each_node)
+ self.project_configuration.iteration += 1
+ return save_location
+
+ def register_load_state_pre_hook(self, hook: Callable[..., None]) -> hooks.RemovableHandle:
+ """
+ Registers a pre hook to be run before [`load_checkpoint`] is called in [`Accelerator.load_state`].
+
+ Args:
+ hook (`Callable`):
+ A function to be called in [`Accelerator.load_state`] before `load_checkpoint`.
+
+ The hook should have the following signature:
+
+ `hook(models: list[torch.nn.Module], input_dir: str) -> None`
+
+ The `models` argument are the models as saved in the accelerator state under `accelerator._models`, and the
+ `input_dir` argument is the `input_dir` argument passed to [`Accelerator.load_state`].
+
+
+
+ Should only be used in conjunction with [`Accelerator.register_save_state_pre_hook`]. Can be useful to load
+ configurations in addition to model weights. Can also be used to overwrite model loading with a customized
+ method. In this case, make sure to remove already loaded models from the models list.
+
+
+
+ Returns:
+ `torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling
+ `handle.remove()`
+ """
+ handle = hooks.RemovableHandle(self._load_model_state_pre_hook)
+ self._load_model_state_pre_hook[handle.id] = hook
+ return handle
+
+ def load_state(self, input_dir: str = None, **load_model_func_kwargs):
+ """
+ Loads the current states of the model, optimizer, scaler, RNG generators, and registered objects.
+
+
+
+ Should only be used in conjunction with [`Accelerator.save_state`]. If a file is not registered for
+ checkpointing, it will not be loaded if stored in the directory.
+
+
+
+ Args:
+ input_dir (`str` or `os.PathLike`):
+ The name of the folder all relevant weights and states were saved in. Can be `None` if
+ `automatic_checkpoint_naming` is used, and will pick up from the latest checkpoint.
+ load_model_func_kwargs (`dict`, *optional*):
+ Additional keyword arguments for loading model which can be passed to the underlying load function,
+ such as optional arguments for DeepSpeed's `load_checkpoint` function or a `map_location` to load the
+ model and optimizer on.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> model, optimizer, lr_scheduler = ...
+ >>> model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler)
+ >>> accelerator.load_state("my_checkpoint")
+ ```
+ """
+ if input_dir is not None:
+ # Check if folder exists
+ input_dir = os.path.expanduser(input_dir)
+ if not os.path.isdir(input_dir):
+ raise ValueError(f"Tried to find {input_dir} but folder does not exist")
+ elif self.project_configuration.automatic_checkpoint_naming:
+ # Pick up from automatic checkpoint naming
+ input_dir = os.path.join(self.project_dir, "checkpoints")
+ folders = [os.path.join(input_dir, folder) for folder in os.listdir(input_dir)]
+
+ def _inner(folder):
+ return list(map(int, re.findall(r"[\/]?([0-9]+)(?=[^\/]*$)", folder)))[0]
+
+ folders.sort(key=_inner)
+ input_dir = folders[-1]
+ else:
+ raise ValueError("No input_dir provided and automatic checkpoint naming is disabled.")
+ logger.info(f"Loading states from {input_dir}")
+
+ # Load the models taking care of FSDP and DeepSpeed nuances
+ models = []
+ for i, model in enumerate(self._models):
+ if self.distributed_type == DistributedType.FSDP:
+ logger.info("Loading FSDP model")
+ load_fsdp_model(self.state.fsdp_plugin, self, model, input_dir, i)
+ logger.info(f"FSDP Model loaded from input dir {input_dir}")
+ elif self.distributed_type == DistributedType.DEEPSPEED:
+ logger.info("Loading DeepSpeed Model and Optimizer")
+ ckpt_id = f"{MODEL_NAME}" if i == 0 else f"{MODEL_NAME}_{i}"
+ model.load_checkpoint(input_dir, ckpt_id, **load_model_func_kwargs)
+ logger.info(f"DeepSpeed Model and Optimizer loaded from input dir {os.path.join(input_dir, ckpt_id)}")
+ elif self.distributed_type == DistributedType.MEGATRON_LM:
+ logger.info("Loading Megatron-LM Model, Optimizer and Scheduler")
+ model.load_checkpoint(input_dir)
+ logger.info(f"Megatron-LM Model , Optimizer and Scheduler loaded from input dir {input_dir}")
+ else:
+ models.append(model)
+
+ # Load the optimizers taking care of FSDP and DeepSpeed nuances
+ optimizers = []
+ if self.distributed_type == DistributedType.FSDP:
+ for i, opt in enumerate(self._optimizers):
+ logger.info("Loading FSDP Optimizer")
+ load_fsdp_optimizer(self.state.fsdp_plugin, self, opt, self._models[i], input_dir, i)
+ logger.info(f"FSDP Optimizer loaded from input dir {input_dir}")
+ elif self.distributed_type not in [DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]:
+ optimizers = self._optimizers
+
+ # Load the lr schedulers taking care of DeepSpeed nuances
+ schedulers = []
+ if self.distributed_type == DistributedType.DEEPSPEED:
+ for i, scheduler in enumerate(self._schedulers):
+ if isinstance(scheduler, DeepSpeedSchedulerWrapper):
+ continue
+ schedulers.append(scheduler)
+ elif self.distributed_type not in [DistributedType.MEGATRON_LM]:
+ schedulers = self._schedulers
+
+ dataloaders = self._dataloaders
+
+ # Call model loading hooks that might have been registered with
+ # accelerator.register_model_state_hook
+ for hook in self._load_model_state_pre_hook.values():
+ hook(models, input_dir)
+
+ map_location = load_model_func_kwargs.pop("map_location", None)
+ if map_location is None:
+ if self.num_processes > 1 and self.distributed_type in (
+ DistributedType.MULTI_GPU,
+ DistributedType.MULTI_MLU,
+ DistributedType.MULTI_NPU,
+ ):
+ map_location = "on_device"
+ else:
+ map_location = "cpu"
+
+ load_accelerator_state(
+ input_dir,
+ models,
+ optimizers,
+ schedulers,
+ dataloaders,
+ self.state.process_index,
+ self.scaler,
+ map_location,
+ **load_model_func_kwargs,
+ )
+ custom_checkpoints = [
+ f for f in os.listdir(input_dir) if re.search(r"^custom_checkpoint_\d+\.pkl$", f) is not None
+ ]
+ if len(custom_checkpoints) != len(self._custom_objects):
+ err = "Number of custom checkpoints in folder {input_dir} does not match the number of registered objects:"
+ err += f"\n\tFound checkpoints: {len(custom_checkpoints)}"
+ err += f"\n\tRegistered objects: {len(self._custom_objects)}\n"
+ err += "Please make sure to only load checkpoints from folders that were created with the same set of registered objects,"
+ err += "or avoid using `custom_checkpoint` in the filename for files in that same directory and load them in manually."
+ raise RuntimeError(err)
+ else:
+ logger.info(f"Loading in {len(custom_checkpoints)} custom states")
+ for index, obj in enumerate(self._custom_objects):
+ load_custom_state(obj, input_dir, index)
+
+ def free_memory(self):
+ """
+ Will release all references to the internal objects stored and call the garbage collector. You should call this
+ method between two trainings with different models/optimizers. Also will reset `Accelerator.step` to 0.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> model, optimizer, scheduler = ...
+ >>> model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler)
+ >>> accelerator.free_memory()
+ >>> del model, optimizer, scheduler
+ ```
+ """
+ self._schedulers = []
+ self._optimizers = []
+ self._models = []
+ self._dataloaders = []
+ self.deepspeed_engine_wrapped = None
+ self.step = 0
+ release_memory()
+
+ def clear(self):
+ """
+ Alias for [`Accelerate.free_memory`], releases all references to the internal objects stored and call the
+ garbage collector. You should call this method between two trainings with different models/optimizers.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> model, optimizer, scheduler = ...
+ >>> model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler)
+ >>> accelerator.free_memory()
+ >>> del model, optimizer, scheduler
+ ```
+ """
+ self.free_memory()
+
+ def _get_named_parameters(self, *args):
+ named_parameters = {}
+ for obj in args:
+ if isinstance(obj, torch.nn.Module):
+ obj = extract_model_from_parallel(obj)
+ named_parameters.update({n: p for n, p in obj.named_parameters()})
+ return named_parameters
+
+ def _get_devices(self, *args):
+ model_device = None
+ optimizer_device = None
+ for obj in args:
+ # Loop through model parameters and stop at the first once we have its device.
+ if isinstance(obj, torch.nn.Module):
+ for param in obj.parameters():
+ model_device = param.device
+ break
+ # Loop through optimizer parameters groups and stop at the first once we have its device.
+ if isinstance(obj, torch.optim.Optimizer):
+ for param_group in obj.param_groups:
+ if len(param_group["params"]) > 0:
+ optimizer_device = param_group["params"][0].device
+ break
+ return (model_device, optimizer_device)
+
+ def get_state_dict(self, model, unwrap=True):
+ """
+ Returns the state dictionary of a model sent through [`Accelerator.prepare`] potentially without full
+ precision.
+
+ Args:
+ model (`torch.nn.Module`):
+ A PyTorch model sent through [`Accelerator.prepare`]
+ unwrap (`bool`, *optional*, defaults to `True`):
+ Whether to return the original underlying state_dict of `model` or to return the wrapped state_dict
+
+ Returns:
+ `dict`: The state dictionary of the model potentially without full precision.
+
+ Example:
+
+ ```python
+ >>> import torch
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> net = torch.nn.Linear(2, 2)
+ >>> net = accelerator.prepare(net)
+ >>> state_dict = accelerator.get_state_dict(net)
+ ```
+ """
+
+ if self.distributed_type == DistributedType.DEEPSPEED:
+ if self.deepspeed_config["zero_optimization"]["stage"] == 3:
+ if model.zero_gather_16bit_weights_on_model_save():
+ state_dict = model._zero3_consolidated_16bit_state_dict()
+ else:
+ raise ValueError(
+ "Cannot get 16bit model weights because `stage3_gather_16bit_weights_on_model_save` in DeepSpeed config is False. "
+ "To save the model weights in 16bit, set `stage3_gather_16bit_weights_on_model_save` to True in DeepSpeed config file or "
+ "set `zero3_save_16bit_model` to True when using `accelerate config`. "
+ "To save the full checkpoint, run `model.save_checkpoint(save_dir)` and use `zero_to_fp32.py` to recover weights."
+ )
+ else:
+ from deepspeed.checkpoint.utils import clone_tensors_for_torch_save
+
+ state_dict = clone_tensors_for_torch_save(self.unwrap_model(model).state_dict())
+ elif self.distributed_type == DistributedType.FSDP:
+ from torch.distributed.fsdp import FullStateDictConfig, StateDictType
+ from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
+
+ full_state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
+ with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT, full_state_dict_config):
+ state_dict = model.state_dict()
+ else:
+ if unwrap:
+ model = self.unwrap_model(model)
+ state_dict = model.state_dict()
+
+ return state_dict
+
+ def register_for_checkpointing(self, *objects):
+ """
+ Makes note of `objects` and will save or load them in during `save_state` or `load_state`.
+
+ These should be utilized when the state is being loaded or saved in the same script. It is not designed to be
+ used in different scripts.
+
+
+
+ Every `object` must have a `load_state_dict` and `state_dict` function to be stored.
+
+
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> # Assume `CustomObject` has a `state_dict` and `load_state_dict` function.
+ >>> obj = CustomObject()
+ >>> accelerator.register_for_checkpointing(obj)
+ >>> accelerator.save_state("checkpoint.pt")
+ ```
+ """
+ invalid_objects = []
+ for obj in objects:
+ if not hasattr(obj, "state_dict") or not hasattr(obj, "load_state_dict"):
+ invalid_objects.append(obj)
+ if len(invalid_objects) > 0:
+ err = "All `objects` must include a `state_dict` and `load_state_dict` function to be stored. The following inputs are invalid:"
+ for index, obj in enumerate(invalid_objects):
+ err += f"\n\t- Item at index {index}, `{get_pretty_name(obj)}`"
+ raise ValueError(err)
+ self._custom_objects.extend(objects)
+
+ @contextmanager
+ def autocast(self, cache_enabled: bool = False, autocast_handler: AutocastKwargs = None):
+ """
+ Will apply automatic mixed-precision inside the block inside this context manager, if it is enabled. Nothing
+ different will happen otherwise.
+
+ A different `autocast_handler` can be passed in to override the one set in the `Accelerator` object. This is
+ useful in blocks under `autocast` where you want to revert to fp32.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator(mixed_precision="fp16")
+ >>> with accelerator.autocast():
+ ... train()
+ ```
+ """
+ if cache_enabled:
+ warnings.warn(
+ "Passing `cache_enabled=True` to `accelerator.autocast` is deprecated and will be removed in v0.23.0. "
+ "Please use the `AutocastKwargs` class instead and pass it to the `Accelerator` as a `kwarg_handler`.",
+ FutureWarning,
+ )
+ if self.autocast_handler is not None:
+ self.autocast_handler.cache_enabled = True
+ else:
+ self.autocast_handler = AutocastKwargs(cache_enabled=True)
+ if autocast_handler is None:
+ autocast_handler = self.autocast_handler
+ autocast_context = get_mixed_precision_context_manager(self.native_amp, autocast_handler)
+ autocast_context.__enter__()
+ # TODO: should the `yield` be in a try/finally block?
+ yield
+ autocast_context.__exit__(*sys.exc_info())
+
+ @property
+ def optimizer_step_was_skipped(self):
+ """
+ Whether or not the optimizer update was skipped (because of gradient overflow in mixed precision), in which
+ case the learning rate should not be changed.
+ """
+ for optimizer in self._optimizers:
+ if optimizer.step_was_skipped:
+ return True
+ return False
+
+ def skip_first_batches(self, dataloader, num_batches: int = 0):
+ """
+ Creates a new `torch.utils.data.DataLoader` that will efficiently skip the first `num_batches`.
+
+ Args:
+ dataloader (`torch.utils.data.DataLoader`): The data loader in which to skip batches.
+ num_batches (`int`, *optional*, defaults to 0): The number of batches to skip
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler)
+ >>> skipped_dataloader = accelerator.skip_first_batches(dataloader, num_batches=2)
+ >>> # for the first epoch only
+ >>> for input, target in skipped_dataloader:
+ ... optimizer.zero_grad()
+ ... output = model(input)
+ ... loss = loss_func(output, target)
+ ... accelerator.backward(loss)
+ ... optimizer.step()
+
+ >>> # subsequent epochs
+ >>> for input, target in dataloader:
+ ... optimizer.zero_grad()
+ ... ...
+ ```
+ """
+ return skip_first_batches(dataloader, num_batches=num_batches)
+
+ def __deepcopy__(self, memo):
+ logger.info("Deep copying the `Accelerator` object, note that this will point to the same original object.")
+ return self
+
+ def verify_device_map(self, model: torch.nn.Module) -> bool:
+ """
+ Verifies that `model` has not been prepared with big model inference with a device-map resembling `auto`.
+ """
+ # Checks if any of the child modules has the attribute `hf_device_map` and this map has more than one entry.
+ for m in model.modules():
+ if hasattr(m, "hf_device_map") and len(m.hf_device_map) > 1:
+ return True
+
+ return False
diff --git a/venv/lib/python3.10/site-packages/accelerate/big_modeling.py b/venv/lib/python3.10/site-packages/accelerate/big_modeling.py
new file mode 100644
index 0000000000000000000000000000000000000000..94febb5d3dde35689d99ebbf26c1c78346f0ab17
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/accelerate/big_modeling.py
@@ -0,0 +1,627 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import os
+from contextlib import contextmanager
+from functools import wraps
+from typing import Dict, List, Optional, Union
+
+import torch
+import torch.nn as nn
+
+from .hooks import (
+ AlignDevicesHook,
+ CpuOffload,
+ UserCpuOffloadHook,
+ add_hook_to_module,
+ attach_align_device_hook,
+ attach_align_device_hook_on_blocks,
+)
+from .utils import (
+ OffloadedWeightsLoader,
+ check_cuda_p2p_ib_support,
+ check_device_map,
+ extract_submodules_state_dict,
+ find_tied_parameters,
+ get_balanced_memory,
+ infer_auto_device_map,
+ is_mlu_available,
+ is_npu_available,
+ is_torch_version,
+ is_xpu_available,
+ load_checkpoint_in_model,
+ offload_state_dict,
+ parse_flag_from_env,
+ retie_parameters,
+)
+from .utils.other import recursive_getattr
+
+
+logger = logging.getLogger(__name__)
+
+
+@contextmanager
+def init_empty_weights(include_buffers: bool = None):
+ """
+ A context manager under which models are initialized with all parameters on the meta device, therefore creating an
+ empty model. Useful when just initializing the model would blow the available RAM.
+
+ Args:
+ include_buffers (`bool`, *optional*):
+ Whether or not to also put all buffers on the meta device while initializing.
+
+ Example:
+
+ ```python
+ import torch.nn as nn
+ from accelerate import init_empty_weights
+
+ # Initialize a model with 100 billions parameters in no time and without using any RAM.
+ with init_empty_weights():
+ tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])
+ ```
+
+
+
+ Any model created under this context manager has no weights. As such you can't do something like
+ `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].
+ Make sure to overwrite the default device_map param for [`load_checkpoint_and_dispatch`], otherwise dispatch is not
+ called.
+
+
+ """
+ if include_buffers is None:
+ include_buffers = parse_flag_from_env("ACCELERATE_INIT_INCLUDE_BUFFERS", False)
+ with init_on_device(torch.device("meta"), include_buffers=include_buffers) as f:
+ yield f
+
+
+@contextmanager
+def init_on_device(device: torch.device, include_buffers: bool = None):
+ """
+ A context manager under which models are initialized with all parameters on the specified device.
+
+ Args:
+ device (`torch.device`):
+ Device to initialize all parameters on.
+ include_buffers (`bool`, *optional*):
+ Whether or not to also put all buffers on the meta device while initializing.
+
+ Example:
+
+ ```python
+ import torch.nn as nn
+ from accelerate import init_on_device
+
+ with init_on_device(device=torch.device("cuda")):
+ tst = nn.Liner(100, 100) # on `cuda` device
+ ```
+ """
+ if include_buffers is None:
+ include_buffers = parse_flag_from_env("ACCELERATE_INIT_INCLUDE_BUFFERS", False)
+
+ # TODO(shingjan): remove the torch version check once older versions are deprecated
+ if is_torch_version(">=", "2.0") and include_buffers:
+ with device:
+ yield
+ return
+
+ old_register_parameter = nn.Module.register_parameter
+ if include_buffers:
+ old_register_buffer = nn.Module.register_buffer
+
+ def register_empty_parameter(module, name, param):
+ old_register_parameter(module, name, param)
+ if param is not None:
+ param_cls = type(module._parameters[name])
+ kwargs = module._parameters[name].__dict__
+ kwargs["requires_grad"] = param.requires_grad
+ module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs)
+
+ def register_empty_buffer(module, name, buffer, persistent=True):
+ old_register_buffer(module, name, buffer, persistent=persistent)
+ if buffer is not None:
+ module._buffers[name] = module._buffers[name].to(device)
+
+ # Patch tensor creation
+ if include_buffers:
+ tensor_constructors_to_patch = {
+ torch_function_name: getattr(torch, torch_function_name)
+ for torch_function_name in ["empty", "zeros", "ones", "full"]
+ }
+ else:
+ tensor_constructors_to_patch = {}
+
+ def patch_tensor_constructor(fn):
+ def wrapper(*args, **kwargs):
+ kwargs["device"] = device
+ return fn(*args, **kwargs)
+
+ return wrapper
+
+ try:
+ nn.Module.register_parameter = register_empty_parameter
+ if include_buffers:
+ nn.Module.register_buffer = register_empty_buffer
+ for torch_function_name in tensor_constructors_to_patch.keys():
+ setattr(torch, torch_function_name, patch_tensor_constructor(getattr(torch, torch_function_name)))
+ yield
+ finally:
+ nn.Module.register_parameter = old_register_parameter
+ if include_buffers:
+ nn.Module.register_buffer = old_register_buffer
+ for torch_function_name, old_torch_function in tensor_constructors_to_patch.items():
+ setattr(torch, torch_function_name, old_torch_function)
+
+
+def cpu_offload(
+ model: nn.Module,
+ execution_device: Optional[torch.device] = None,
+ offload_buffers: bool = False,
+ state_dict: Optional[Dict[str, torch.Tensor]] = None,
+ preload_module_classes: Optional[List[str]] = None,
+):
+ """
+ Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one
+ copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that
+ state dict and put on the execution device passed as they are needed, then offloaded again.
+
+ Args:
+ model (`torch.nn.Module`):
+ The model to offload.
+ execution_device (`torch.device`, *optional*):
+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the
+ model first parameter device.
+ offload_buffers (`bool`, *optional*, defaults to `False`):
+ Whether or not to offload the buffers with the model parameters.
+ state_dict (`Dict[str, torch.Tensor]`, *optional*):
+ The state dict of the model that will be kept on CPU.
+ preload_module_classes (`List[str]`, *optional*):
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
+ of the forward. This should only be used for classes that have submodules which are registered but not
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
+ """
+ if execution_device is None:
+ execution_device = next(iter(model.parameters())).device
+ if state_dict is None:
+ state_dict = {n: p.to("cpu") for n, p in model.state_dict().items()}
+
+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True)
+ attach_align_device_hook(
+ model,
+ execution_device=execution_device,
+ offload=True,
+ offload_buffers=offload_buffers,
+ weights_map=state_dict,
+ preload_module_classes=preload_module_classes,
+ )
+
+ return model
+
+
+def cpu_offload_with_hook(
+ model: torch.nn.Module,
+ execution_device: Optional[Union[int, str, torch.device]] = None,
+ prev_module_hook: Optional[UserCpuOffloadHook] = None,
+):
+ """
+ Offloads a model on the CPU and puts it back to an execution device when executed. The difference with
+ [`cpu_offload`] is that the model stays on the execution device after the forward and is only offloaded again when
+ the `offload` method of the returned `hook` is called. Useful for pipelines running a model in a loop.
+
+ Args:
+ model (`torch.nn.Module`):
+ The model to offload.
+ execution_device(`str`, `int` or `torch.device`, *optional*):
+ The device on which the model should be executed. Will default to the MPS device if it's available, then
+ GPU 0 if there is a GPU, and finally to the CPU.
+ prev_module_hook (`UserCpuOffloadHook`, *optional*):
+ The hook sent back by this function for a previous model in the pipeline you are running. If passed, its
+ offload method will be called just before the forward of the model to which this hook is attached.
+
+ Example:
+
+ ```py
+ model_1, hook_1 = cpu_offload_with_hook(model_1, cuda_device)
+ model_2, hook_2 = cpu_offload_with_hook(model_2, cuda_device, prev_module_hook=hook_1)
+ model_3, hook_3 = cpu_offload_with_hook(model_3, cuda_device, prev_module_hook=hook_2)
+
+ hid_1 = model_1(input)
+ for i in range(50):
+ # model1 is offloaded on the CPU at the first iteration, model 2 stays on the GPU for this whole loop.
+ hid_2 = model_2(hid_1)
+ # model2 is offloaded to the CPU just before this forward.
+ hid_3 = model_3(hid_3)
+
+ # For model3, you need to manually call the hook offload method.
+ hook_3.offload()
+ ```
+ """
+ hook = CpuOffload(execution_device=execution_device, prev_module_hook=prev_module_hook)
+ add_hook_to_module(model, hook, append=True)
+ user_hook = UserCpuOffloadHook(model, hook)
+ return model, user_hook
+
+
+def disk_offload(
+ model: nn.Module,
+ offload_dir: Union[str, os.PathLike],
+ execution_device: Optional[torch.device] = None,
+ offload_buffers: bool = False,
+ preload_module_classes: Optional[List[str]] = None,
+):
+ """
+ Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as
+ memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and
+ put on the execution device passed as they are needed, then offloaded again.
+
+ Args:
+ model (`torch.nn.Module`): The model to offload.
+ offload_dir (`str` or `os.PathLike`):
+ The folder in which to offload the model weights (or where the model weights are already offloaded).
+ execution_device (`torch.device`, *optional*):
+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the
+ model's first parameter device.
+ offload_buffers (`bool`, *optional*, defaults to `False`):
+ Whether or not to offload the buffers with the model parameters.
+ preload_module_classes (`List[str]`, *optional*):
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
+ of the forward. This should only be used for classes that have submodules which are registered but not
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
+ """
+ if not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, "index.json")):
+ offload_state_dict(offload_dir, model.state_dict())
+ if execution_device is None:
+ execution_device = next(iter(model.parameters())).device
+ weights_map = OffloadedWeightsLoader(save_folder=offload_dir)
+
+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True)
+ attach_align_device_hook(
+ model,
+ execution_device=execution_device,
+ offload=True,
+ offload_buffers=offload_buffers,
+ weights_map=weights_map,
+ preload_module_classes=preload_module_classes,
+ )
+
+ return model
+
+
+def dispatch_model(
+ model: nn.Module,
+ device_map: Dict[str, Union[str, int, torch.device]],
+ main_device: Optional[torch.device] = None,
+ state_dict: Optional[Dict[str, torch.Tensor]] = None,
+ offload_dir: Optional[Union[str, os.PathLike]] = None,
+ offload_index: Optional[Dict[str, str]] = None,
+ offload_buffers: bool = False,
+ skip_keys: Optional[Union[str, List[str]]] = None,
+ preload_module_classes: Optional[List[str]] = None,
+ force_hooks: bool = False,
+):
+ """
+ Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on
+ the CPU or even the disk.
+
+ Args:
+ model (`torch.nn.Module`):
+ The model to dispatch.
+ device_map (`Dict[str, Union[str, int, torch.device]]`):
+ A dictionary mapping module names in the models `state_dict` to the device they should go to. Note that
+ `"disk"` is accepted even if it's not a proper value for `torch.device`.
+ main_device (`str`, `int` or `torch.device`, *optional*):
+ The main execution device. Will default to the first device in the `device_map` different from `"cpu"` or
+ `"disk"`.
+ state_dict (`Dict[str, torch.Tensor]`, *optional*):
+ The state dict of the part of the model that will be kept on CPU.
+ offload_dir (`str` or `os.PathLike`):
+ The folder in which to offload the model weights (or where the model weights are already offloaded).
+ offload_index (`Dict`, *optional*):
+ A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default
+ to the index saved in `save_folder`.
+ offload_buffers (`bool`, *optional*, defaults to `False`):
+ Whether or not to offload the buffers with the model parameters.
+ skip_keys (`str` or `List[str]`, *optional*):
+ A list of keys to ignore when moving inputs or outputs between devices.
+ preload_module_classes (`List[str]`, *optional*):
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
+ of the forward. This should only be used for classes that have submodules which are registered but not
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
+ force_hooks (`bool`, *optional*, defaults to `False`):
+ Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a
+ single device.
+ """
+ # Error early if the device map is incomplete.
+ check_device_map(model, device_map)
+
+ # for backward compatibility
+ is_bnb_quantized = (
+ getattr(model, "is_quantized", False) or getattr(model, "is_loaded_in_8bit", False)
+ ) and getattr(model, "quantization_method", "bitsandbytes") == "bitsandbytes"
+
+ # We attach hooks if the device_map has at least 2 different devices or if
+ # force_hooks is set to `True`. Otherwise, the model in already loaded
+ # in the unique device and the user can decide where to dispatch the model.
+ # If the model is quantized, we always force-dispatch the model
+ if (len(set(device_map.values())) > 1) or is_bnb_quantized or force_hooks:
+ if main_device is None:
+ if set(device_map.values()) == {"cpu"} or set(device_map.values()) == {"cpu", "disk"}:
+ main_device = "cpu"
+ else:
+ main_device = [d for d in device_map.values() if d not in ["cpu", "disk"]][0]
+
+ if main_device != "cpu":
+ cpu_modules = [name for name, device in device_map.items() if device == "cpu"]
+ if state_dict is None and len(cpu_modules) > 0:
+ state_dict = extract_submodules_state_dict(model.state_dict(), cpu_modules)
+
+ disk_modules = [name for name, device in device_map.items() if device == "disk"]
+ if offload_dir is None and offload_index is None and len(disk_modules) > 0:
+ raise ValueError(
+ "We need an `offload_dir` to dispatch this model according to this `device_map`, the following submodules "
+ f"need to be offloaded: {', '.join(disk_modules)}."
+ )
+ if (
+ len(disk_modules) > 0
+ and offload_index is None
+ and (not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, "index.json")))
+ ):
+ disk_state_dict = extract_submodules_state_dict(model.state_dict(), disk_modules)
+ offload_state_dict(offload_dir, disk_state_dict)
+
+ execution_device = {
+ name: main_device if device in ["cpu", "disk"] else device for name, device in device_map.items()
+ }
+ execution_device[""] = main_device
+ offloaded_devices = ["disk"] if main_device == "cpu" or main_device == "mps" else ["cpu", "disk"]
+ offload = {name: device in offloaded_devices for name, device in device_map.items()}
+ save_folder = offload_dir if len(disk_modules) > 0 else None
+ if state_dict is not None or save_folder is not None or offload_index is not None:
+ device = main_device if offload_index is not None else None
+ weights_map = OffloadedWeightsLoader(
+ state_dict=state_dict, save_folder=save_folder, index=offload_index, device=device
+ )
+ else:
+ weights_map = None
+
+ # When dispatching the model's parameters to the devices specified in device_map, we want to avoid allocating memory several times for the
+ # tied parameters. The dictionary tied_params_map keeps track of the already allocated data for a given tied parameter (represented by its
+ # original pointer) on each devices.
+ tied_params = find_tied_parameters(model)
+
+ tied_params_map = {}
+ for group in tied_params:
+ for param_name in group:
+ # data_ptr() is enough here, as `find_tied_parameters` finds tied params simply by comparing `param1 is param2`, so we don't need
+ # to care about views of tensors through storage_offset.
+ data_ptr = recursive_getattr(model, param_name).data_ptr()
+ tied_params_map[data_ptr] = {}
+
+ # Note: To handle the disk offloading case, we can not simply use weights_map[param_name].data_ptr() as the reference pointer,
+ # as we have no guarantee that safetensors' `file.get_tensor()` will always give the same pointer.
+
+ attach_align_device_hook_on_blocks(
+ model,
+ execution_device=execution_device,
+ offload=offload,
+ offload_buffers=offload_buffers,
+ weights_map=weights_map,
+ skip_keys=skip_keys,
+ preload_module_classes=preload_module_classes,
+ tied_params_map=tied_params_map,
+ )
+
+ # warn if there is any params on the meta device
+ offloaded_devices_str = " and ".join(
+ [device for device in set(device_map.values()) if device in ("cpu", "disk")]
+ )
+ if len(offloaded_devices_str) > 0:
+ logging.warning(
+ f"Some parameters are on the meta device device because they were offloaded to the {offloaded_devices_str}."
+ )
+
+ # Attaching the hook may break tied weights, so we retie them
+ retie_parameters(model, tied_params)
+
+ # add warning to cuda and to method
+ def add_warning(fn, model):
+ @wraps(fn)
+ def wrapper(*args, **kwargs):
+ warning_msg = "You shouldn't move a model that is dispatched using accelerate hooks."
+ if str(fn.__name__) == "to":
+ to_device = torch._C._nn._parse_to(*args, **kwargs)[0]
+ if to_device is not None:
+ logger.warning(warning_msg)
+ else:
+ logger.warning(warning_msg)
+ for param in model.parameters():
+ if param.device == torch.device("meta"):
+ raise RuntimeError("You can't move a model that has some modules offloaded to cpu or disk.")
+ return fn(*args, **kwargs)
+
+ return wrapper
+
+ model.to = add_warning(model.to, model)
+ if is_npu_available():
+ model.npu = add_warning(model.npu, model)
+ elif is_mlu_available():
+ model.mlu = add_warning(model.mlu, model)
+ elif is_xpu_available():
+ model.xpu = add_warning(model.xpu, model)
+ else:
+ model.cuda = add_warning(model.cuda, model)
+
+ # Check if we are using multi-gpus with RTX 4000 series
+ use_multi_gpu = len([device for device in set(device_map.values()) if device not in ("cpu", "disk")]) > 1
+ if use_multi_gpu and not check_cuda_p2p_ib_support():
+ logger.warning(
+ "We've detected an older driver with an RTX 4000 series GPU. These drivers have issues with P2P. "
+ "This can affect the multi-gpu inference when using accelerate device_map."
+ "Please make sure to update your driver to the latest version which resolves this."
+ )
+ else:
+ device = list(device_map.values())[0]
+ # `torch.Tensor.to()` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)).
+ if is_npu_available() and isinstance(device, int):
+ device = f"npu:{device}"
+ elif is_mlu_available() and isinstance(device, int):
+ device = f"mlu:{device}"
+ elif is_xpu_available() and isinstance(device, int):
+ device = f"xpu:{device}"
+ if device != "disk":
+ model.to(device)
+ else:
+ raise ValueError(
+ "You are trying to offload the whole model to the disk. Please use the `disk_offload` function instead."
+ )
+ # Convert OrderedDict back to dict for easier usage
+ model.hf_device_map = dict(device_map)
+ return model
+
+
+def load_checkpoint_and_dispatch(
+ model: nn.Module,
+ checkpoint: Union[str, os.PathLike],
+ device_map: Optional[Union[str, Dict[str, Union[int, str, torch.device]]]] = None,
+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,
+ no_split_module_classes: Optional[List[str]] = None,
+ offload_folder: Optional[Union[str, os.PathLike]] = None,
+ offload_buffers: bool = False,
+ dtype: Optional[Union[str, torch.dtype]] = None,
+ offload_state_dict: Optional[bool] = None,
+ skip_keys: Optional[Union[str, List[str]]] = None,
+ preload_module_classes: Optional[List[str]] = None,
+ force_hooks: bool = False,
+ strict: bool = False,
+):
+ """
+ Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are
+ loaded and adds the various hooks that will make this model run properly (even if split across devices).
+
+ Args:
+ model (`torch.nn.Module`): The model in which we want to load a checkpoint.
+ checkpoint (`str` or `os.PathLike`):
+ The folder checkpoint to load. It can be:
+ - a path to a file containing a whole model state dict
+ - a path to a `.json` file containing the index to a sharded checkpoint
+ - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint.
+ device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*):
+ A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer
+ name, once a given module name is inside, every submodule of it will be sent to the same device.
+
+ To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more
+ information about each option see [here](../concept_guides/big_model_inference#designing-a-device-map).
+ Defaults to None, which means [`dispatch_model`] will not be called.
+ max_memory (`Dict`, *optional*):
+ A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU
+ and the available CPU RAM if unset.
+ no_split_module_classes (`List[str]`, *optional*):
+ A list of layer class names that should never be split across device (for instance any layer that has a
+ residual connection).
+ offload_folder (`str` or `os.PathLike`, *optional*):
+ If the `device_map` contains any value `"disk"`, the folder where we will offload weights.
+ offload_buffers (`bool`, *optional*, defaults to `False`):
+ In the layers that are offloaded on the CPU or the hard drive, whether or not to offload the buffers as
+ well as the parameters.
+ dtype (`str` or `torch.dtype`, *optional*):
+ If provided, the weights will be converted to that type when loaded.
+ offload_state_dict (`bool`, *optional*):
+ If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if
+ the weight of the CPU state dict + the biggest shard does not fit. Will default to `True` if the device map
+ picked contains `"disk"` values.
+ skip_keys (`str` or `List[str]`, *optional*):
+ A list of keys to ignore when moving inputs or outputs between devices.
+ preload_module_classes (`List[str]`, *optional*):
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
+ of the forward. This should only be used for classes that have submodules which are registered but not
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
+ force_hooks (`bool`, *optional*, defaults to `False`):
+ Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a
+ single device.
+ strict (`bool`, *optional*, defaults to `False`):
+ Whether to strictly enforce that the keys in the checkpoint state_dict match the keys of the model's
+ state_dict.
+
+ Example:
+
+ ```python
+ >>> from accelerate import init_empty_weights, load_checkpoint_and_dispatch
+ >>> from huggingface_hub import hf_hub_download
+ >>> from transformers import AutoConfig, AutoModelForCausalLM
+
+ >>> # Download the Weights
+ >>> checkpoint = "EleutherAI/gpt-j-6B"
+ >>> weights_location = hf_hub_download(checkpoint, "pytorch_model.bin")
+
+ >>> # Create a model and initialize it with empty weights
+ >>> config = AutoConfig.from_pretrained(checkpoint)
+ >>> with init_empty_weights():
+ ... model = AutoModelForCausalLM.from_config(config)
+
+ >>> # Load the checkpoint and dispatch it to the right devices
+ >>> model = load_checkpoint_and_dispatch(
+ ... model, weights_location, device_map="auto", no_split_module_classes=["GPTJBlock"]
+ ... )
+ ```
+ """
+ if isinstance(device_map, str) and device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
+ raise ValueError(
+ "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
+ "'sequential'."
+ )
+ if isinstance(device_map, str):
+ if device_map != "sequential":
+ max_memory = get_balanced_memory(
+ model,
+ max_memory=max_memory,
+ no_split_module_classes=no_split_module_classes,
+ dtype=dtype,
+ low_zero=(device_map == "balanced_low_0"),
+ )
+ device_map = infer_auto_device_map(
+ model,
+ max_memory=max_memory,
+ no_split_module_classes=no_split_module_classes,
+ dtype=dtype,
+ offload_buffers=offload_buffers,
+ )
+ if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
+ offload_state_dict = True
+ load_checkpoint_in_model(
+ model,
+ checkpoint,
+ device_map=device_map,
+ offload_folder=offload_folder,
+ dtype=dtype,
+ offload_state_dict=offload_state_dict,
+ offload_buffers=offload_buffers,
+ strict=strict,
+ )
+ if device_map is None:
+ return model
+ return dispatch_model(
+ model,
+ device_map=device_map,
+ offload_dir=offload_folder,
+ offload_buffers=offload_buffers,
+ skip_keys=skip_keys,
+ preload_module_classes=preload_module_classes,
+ force_hooks=force_hooks,
+ )
diff --git a/venv/lib/python3.10/site-packages/accelerate/checkpointing.py b/venv/lib/python3.10/site-packages/accelerate/checkpointing.py
new file mode 100644
index 0000000000000000000000000000000000000000..307eca49d7c93dcbf450c3de5f5f356e11db1d51
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/accelerate/checkpointing.py
@@ -0,0 +1,275 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import random
+from pathlib import Path
+from typing import List
+
+import numpy as np
+import torch
+from safetensors.torch import load_file
+from torch.cuda.amp import GradScaler
+
+from .utils import (
+ MODEL_NAME,
+ OPTIMIZER_NAME,
+ RNG_STATE_NAME,
+ SAFE_MODEL_NAME,
+ SAFE_WEIGHTS_NAME,
+ SAMPLER_NAME,
+ SCALER_NAME,
+ SCHEDULER_NAME,
+ WEIGHTS_NAME,
+ get_pretty_name,
+ is_torch_xla_available,
+ is_xpu_available,
+ save,
+)
+
+
+if is_torch_xla_available():
+ import torch_xla.core.xla_model as xm
+
+from .logging import get_logger
+from .state import PartialState
+
+
+logger = get_logger(__name__)
+
+
+def save_accelerator_state(
+ output_dir: str,
+ model_states: List[dict],
+ optimizers: list,
+ schedulers: list,
+ dataloaders: list,
+ process_index: int,
+ scaler: GradScaler = None,
+ save_on_each_node: bool = False,
+ safe_serialization: bool = True,
+):
+ """
+ Saves the current states of the models, optimizers, scaler, and RNG generators to a given directory.
+
+
+
+ If `safe_serialization` is `True`, models will be saved with `safetensors` while the rest are saved using native
+ `pickle`.
+
+
+
+ Args:
+ output_dir (`str` or `os.PathLike`):
+ The name of the folder to save all relevant weights and states.
+ model_states (`List[torch.nn.Module]`):
+ A list of model states
+ optimizers (`List[torch.optim.Optimizer]`):
+ A list of optimizer instances
+ schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`):
+ A list of learning rate schedulers
+ dataloaders (`List[torch.utils.data.DataLoader]`):
+ A list of dataloader instances to save their sampler states
+ process_index (`int`):
+ The current process index in the Accelerator state
+ scaler (`torch.cuda.amp.GradScaler`, *optional*):
+ An optional gradient scaler instance to save
+ save_on_each_node (`bool`, *optional*):
+ Whether to save on every node, or only the main node.
+ safe_serialization (`bool`, *optional*, defaults to `True`):
+ Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
+ """
+ output_dir = Path(output_dir)
+ # Model states
+ for i, state in enumerate(model_states):
+ weights_name = WEIGHTS_NAME if not safe_serialization else SAFE_WEIGHTS_NAME
+ if i > 0:
+ weights_name = weights_name.replace(".", f"_{i}.")
+ output_model_file = output_dir.joinpath(weights_name)
+ save(state, output_model_file, save_on_each_node=save_on_each_node, safe_serialization=safe_serialization)
+ logger.info(f"Model weights saved in {output_model_file}")
+ # Optimizer states
+ for i, opt in enumerate(optimizers):
+ state = opt.state_dict()
+ optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin"
+ output_optimizer_file = output_dir.joinpath(optimizer_name)
+ save(state, output_optimizer_file, save_on_each_node=save_on_each_node, safe_serialization=False)
+ logger.info(f"Optimizer state saved in {output_optimizer_file}")
+ # Scheduler states
+ for i, scheduler in enumerate(schedulers):
+ state = scheduler.state_dict()
+ scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin"
+ output_scheduler_file = output_dir.joinpath(scheduler_name)
+ save(state, output_scheduler_file, save_on_each_node=save_on_each_node, safe_serialization=False)
+ logger.info(f"Scheduler state saved in {output_scheduler_file}")
+ # DataLoader states
+ for i, dataloader in enumerate(dataloaders):
+ sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin"
+ output_sampler_file = output_dir.joinpath(sampler_name)
+ # Only save if we have our custom sampler
+ from .data_loader import IterableDatasetShard, SeedableRandomSampler
+
+ if isinstance(dataloader.dataset, IterableDatasetShard):
+ sampler = dataloader.sampler.sampler
+
+ if isinstance(sampler, SeedableRandomSampler):
+ save(sampler, output_sampler_file, save_on_each_node=save_on_each_node, safe_serialization=False)
+ logger.info(f"Sampler state for dataloader {i} saved in {output_sampler_file}")
+
+ # GradScaler state
+ if scaler is not None:
+ state = scaler.state_dict()
+ output_scaler_file = output_dir.joinpath(SCALER_NAME)
+ torch.save(state, output_scaler_file)
+ logger.info(f"Gradient scaler state saved in {output_scaler_file}")
+ # Random number generator states
+ states = {}
+ states_name = f"{RNG_STATE_NAME}_{process_index}.pkl"
+ states["random_state"] = random.getstate()
+ states["numpy_random_seed"] = np.random.get_state()
+ states["torch_manual_seed"] = torch.get_rng_state()
+ if is_xpu_available():
+ states["torch_xpu_manual_seed"] = torch.xpu.get_rng_state_all()
+ else:
+ states["torch_cuda_manual_seed"] = torch.cuda.get_rng_state_all()
+ if is_torch_xla_available():
+ states["xm_seed"] = xm.get_rng_state()
+ output_states_file = output_dir.joinpath(states_name)
+ torch.save(states, output_states_file)
+ logger.info(f"Random states saved in {output_states_file}")
+ return output_dir
+
+
+def load_accelerator_state(
+ input_dir,
+ models,
+ optimizers,
+ schedulers,
+ dataloaders,
+ process_index,
+ scaler=None,
+ map_location=None,
+ **load_model_func_kwargs,
+):
+ """
+ Loads states of the models, optimizers, scaler, and RNG generators from a given directory.
+
+ Args:
+ input_dir (`str` or `os.PathLike`):
+ The name of the folder to load all relevant weights and states.
+ models (`List[torch.nn.Module]`):
+ A list of model instances
+ optimizers (`List[torch.optim.Optimizer]`):
+ A list of optimizer instances
+ schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`):
+ A list of learning rate schedulers
+ process_index (`int`):
+ The current process index in the Accelerator state
+ scaler (`torch.cuda.amp.GradScaler`, *optional*):
+ An optional *GradScaler* instance to load
+ map_location (`str`, *optional*):
+ What device to load the optimizer state onto. Should be one of either "cpu" or "on_device".
+ load_model_func_kwargs (`dict`, *optional*):
+ Additional arguments that can be passed to the model's `load_state_dict` method.
+ """
+ if map_location not in [None, "cpu", "on_device"]:
+ raise TypeError(
+ "Unsupported optimizer map location passed, please choose one of `None`, `'cpu'`, or `'on_device'`"
+ )
+ if map_location is None:
+ map_location = "cpu"
+ elif map_location == "on_device":
+ map_location = PartialState().device
+
+ input_dir = Path(input_dir)
+ # Model states
+ for i, model in enumerate(models):
+ ending = f"_{i}" if i > 0 else ""
+ input_model_file = input_dir.joinpath(f"{SAFE_MODEL_NAME}{ending}.safetensors")
+ if input_model_file.exists():
+ state_dict = load_file(input_model_file, device=str(map_location))
+ else:
+ # Load with torch
+ input_model_file = input_dir.joinpath(f"{MODEL_NAME}{ending}.bin")
+ state_dict = torch.load(input_model_file, map_location=map_location)
+ models[i].load_state_dict(state_dict, **load_model_func_kwargs)
+ logger.info("All model weights loaded successfully")
+
+ # Optimizer states
+ for i, opt in enumerate(optimizers):
+ optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin"
+ input_optimizer_file = input_dir.joinpath(optimizer_name)
+ optimizer_state = torch.load(input_optimizer_file, map_location=map_location)
+ optimizers[i].load_state_dict(optimizer_state)
+ logger.info("All optimizer states loaded successfully")
+
+ # Scheduler states
+ for i, scheduler in enumerate(schedulers):
+ scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin"
+ input_scheduler_file = input_dir.joinpath(scheduler_name)
+ scheduler.load_state_dict(torch.load(input_scheduler_file))
+ logger.info("All scheduler states loaded successfully")
+
+ for i, dataloader in enumerate(dataloaders):
+ sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin"
+ input_sampler_file = input_dir.joinpath(sampler_name)
+ # Only load if we have our custom sampler
+ from .data_loader import IterableDatasetShard, SeedableRandomSampler
+
+ if isinstance(dataloader.dataset, IterableDatasetShard):
+ sampler = dataloader.sampler.sampler
+
+ if isinstance(sampler, SeedableRandomSampler):
+ dataloader.sampler.sampler = torch.load(input_sampler_file)
+ logger.info("All dataloader sampler states loaded successfully")
+
+ # GradScaler state
+ if scaler is not None:
+ input_scaler_file = input_dir.joinpath(SCALER_NAME)
+ scaler.load_state_dict(torch.load(input_scaler_file))
+ logger.info("GradScaler state loaded successfully")
+
+ # Random states
+ try:
+ states = torch.load(input_dir.joinpath(f"{RNG_STATE_NAME}_{process_index}.pkl"))
+ random.setstate(states["random_state"])
+ np.random.set_state(states["numpy_random_seed"])
+ torch.set_rng_state(states["torch_manual_seed"])
+ if is_xpu_available():
+ torch.xpu.set_rng_state_all(states["torch_xpu_manual_seed"])
+ else:
+ torch.cuda.set_rng_state_all(states["torch_cuda_manual_seed"])
+ if is_torch_xla_available():
+ xm.set_rng_state(states["xm_seed"])
+ logger.info("All random states loaded successfully")
+ except Exception:
+ logger.info("Could not load random states")
+
+
+def save_custom_state(obj, path, index: int = 0, save_on_each_node: bool = False):
+ """
+ Saves the state of `obj` to `{path}/custom_checkpoint_{index}.pkl`
+ """
+ # Should this be the right way to get a qual_name type value from `obj`?
+ save_location = Path(path) / f"custom_checkpoint_{index}.pkl"
+ logger.info(f"Saving the state of {get_pretty_name(obj)} to {save_location}")
+ save(obj.state_dict(), save_location, save_on_each_node=save_on_each_node)
+
+
+def load_custom_state(obj, path, index: int = 0):
+ """
+ Loads the state of `obj` at `{path}/custom_checkpoint_{index}.pkl`
+ """
+ load_location = f"{path}/custom_checkpoint_{index}.pkl"
+ logger.info(f"Loading the state of {get_pretty_name(obj)} from {load_location}")
+ obj.load_state_dict(torch.load(load_location, map_location="cpu"))
diff --git a/venv/lib/python3.10/site-packages/accelerate/data_loader.py b/venv/lib/python3.10/site-packages/accelerate/data_loader.py
new file mode 100644
index 0000000000000000000000000000000000000000..0764e0971a3845d04dc1c7fc500d0c06f67d2c0e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/accelerate/data_loader.py
@@ -0,0 +1,1093 @@
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import math
+from contextlib import suppress
+from typing import Callable, List, Optional, Union
+
+import torch
+from torch.utils.data import BatchSampler, DataLoader, IterableDataset, RandomSampler
+
+from .logging import get_logger
+from .state import AcceleratorState, DistributedType, GradientState, is_torch_xla_available
+from .utils import (
+ RNGType,
+ broadcast,
+ broadcast_object_list,
+ concatenate,
+ find_batch_size,
+ get_data_structure,
+ initialize_tensors,
+ is_torch_version,
+ send_to_device,
+ slice_tensors,
+ synchronize_rng_states,
+)
+
+
+logger = get_logger(__name__)
+
+# kwargs of the DataLoader in min version 1.4.0.
+_PYTORCH_DATALOADER_KWARGS = {
+ "batch_size": 1,
+ "shuffle": False,
+ "sampler": None,
+ "batch_sampler": None,
+ "num_workers": 0,
+ "collate_fn": None,
+ "pin_memory": False,
+ "drop_last": False,
+ "timeout": 0,
+ "worker_init_fn": None,
+ "multiprocessing_context": None,
+ "generator": None,
+ "prefetch_factor": 2,
+ "persistent_workers": False,
+}
+
+# kwargs added after by version
+_PYTORCH_DATALOADER_ADDITIONAL_KWARGS = {}
+
+for v, additional_kwargs in _PYTORCH_DATALOADER_ADDITIONAL_KWARGS.items():
+ if is_torch_version(">=", v):
+ _PYTORCH_DATALOADER_KWARGS.update(additional_kwargs)
+
+
+class SeedableRandomSampler(RandomSampler):
+ """
+ Same as a random sampler, except that in `__iter__` a seed can be used.
+
+ Needed specifically in distributed cases, when the random generator for each GPU needs to start from the same seed
+ and be fully reproducable on multiple iterations.
+
+ If a custom `generator` is passed, it will rely on its initial seed as well as the current iteration it is on
+ (stored in `self.epoch`).
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.epoch = 0
+ self.initial_seed = torch.random.initial_seed()
+
+ def __iter__(self):
+ if self.generator is None:
+ self.generator = torch.Generator()
+ self.generator.manual_seed(self.initial_seed)
+
+ # Allow `self.epoch` to modify the seed of the generator
+ seed = self.epoch + self.initial_seed
+ # print("Setting seed at epoch", self.epoch, seed)
+ self.generator.manual_seed(seed)
+ yield from super().__iter__()
+ self.set_epoch(self.epoch + 1)
+
+ def set_epoch(self, epoch: int):
+ "Sets the current iteration of the sampler."
+ self.epoch = epoch
+
+
+class BatchSamplerShard(BatchSampler):
+ """
+ Wraps a PyTorch `BatchSampler` to generate batches for one of the processes only. Instances of this class will
+ always yield a number of batches that is a round multiple of `num_processes` and that all have the same size.
+ Depending on the value of the `drop_last` attribute of the batch sampler passed, it will either stop the iteration
+ at the first batch that would be too small / not present on all processes or loop with indices from the beginning.
+
+ Args:
+ batch_sampler (`torch.utils.data.sampler.BatchSampler`):
+ The batch sampler to split in several shards.
+ num_processes (`int`, *optional*, defaults to 1):
+ The number of processes running concurrently.
+ process_index (`int`, *optional*, defaults to 0):
+ The index of the current process.
+ split_batches (`bool`, *optional*, defaults to `False`):
+ Whether the shards should be created by splitting a batch to give a piece of it on each process, or by
+ yielding different full batches on each process.
+
+ On two processes with a sampler of `[[0, 1, 2, 3], [4, 5, 6, 7]]`, this will result in:
+
+ - the sampler on process 0 to yield `[0, 1, 2, 3]` and the sampler on process 1 to yield `[4, 5, 6, 7]` if
+ this argument is set to `False`.
+ - the sampler on process 0 to yield `[0, 1]` then `[4, 5]` and the sampler on process 1 to yield `[2, 3]`
+ then `[6, 7]` if this argument is set to `True`.
+ even_batches (`bool`, *optional*, defaults to `True`):
+ Whether or not to loop back at the beginning of the sampler when the number of samples is not a round
+ multiple of (original batch size / number of processes).
+
+
+
+ `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches`
+ equal to `False`
+
+ """
+
+ def __init__(
+ self,
+ batch_sampler: BatchSampler,
+ num_processes: int = 1,
+ process_index: int = 0,
+ split_batches: bool = False,
+ even_batches: bool = True,
+ ):
+ if split_batches and batch_sampler.batch_size % num_processes != 0:
+ raise ValueError(
+ f"To use `BatchSamplerShard` in `split_batches` mode, the batch size ({batch_sampler.batch_size}) "
+ f"needs to be a round multiple of the number of processes ({num_processes})."
+ )
+ self.batch_sampler = batch_sampler
+ self.num_processes = num_processes
+ self.process_index = process_index
+ self.split_batches = split_batches
+ self.even_batches = even_batches
+ self.batch_size = getattr(batch_sampler, "batch_size", None)
+ self.drop_last = getattr(batch_sampler, "drop_last", False)
+ if self.batch_size is None and self.even_batches:
+ raise ValueError(
+ "You need to use `even_batches=False` when the batch sampler has no batch size. If you "
+ "are not calling this method directly, set `accelerator.even_batches=False` instead."
+ )
+
+ @property
+ def total_length(self):
+ return len(self.batch_sampler)
+
+ def __len__(self):
+ if self.split_batches:
+ # Split batches does not change the length of the batch sampler
+ return len(self.batch_sampler)
+ if len(self.batch_sampler) % self.num_processes == 0:
+ # If the length is a round multiple of the number of processes, it's easy.
+ return len(self.batch_sampler) // self.num_processes
+ length = len(self.batch_sampler) // self.num_processes
+ if self.drop_last:
+ # Same if we drop the remainder.
+ return length
+ elif self.even_batches:
+ # When we even batches we always get +1
+ return length + 1
+ else:
+ # Otherwise it depends on the process index.
+ return length + 1 if self.process_index < len(self.batch_sampler) % self.num_processes else length
+
+ def __iter__(self):
+ return self._iter_with_split() if self.split_batches else self._iter_with_no_split()
+
+ def _iter_with_split(self):
+ initial_data = []
+ batch_length = self.batch_sampler.batch_size // self.num_processes
+ for idx, batch in enumerate(self.batch_sampler):
+ if idx == 0:
+ initial_data = batch
+ if len(batch) == self.batch_size:
+ # If the batch is full, we yield the part of it this process is responsible of.
+ yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]
+
+ # If drop_last is True of the last batch was full, iteration is over, otherwise...
+ if not self.drop_last and len(initial_data) > 0 and len(batch) < self.batch_size:
+ if not self.even_batches:
+ if len(batch) > batch_length * self.process_index:
+ yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]
+ else:
+ # For degenerate cases where the dataset has less than num_process * batch_size samples
+ while len(initial_data) < self.batch_size:
+ initial_data += initial_data
+ batch = batch + initial_data
+ yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]
+
+ def _iter_with_no_split(self):
+ initial_data = []
+ batch_to_yield = []
+ for idx, batch in enumerate(self.batch_sampler):
+ # We gather the initial indices in case we need to circle back at the end.
+ if not self.drop_last and idx < self.num_processes:
+ initial_data += batch
+ # We identify the batch to yield but wait until we ar sure every process gets a full batch before actually
+ # yielding it.
+ if idx % self.num_processes == self.process_index:
+ batch_to_yield = batch
+ if idx % self.num_processes == self.num_processes - 1 and (
+ self.batch_size is None or len(batch) == self.batch_size
+ ):
+ yield batch_to_yield
+ batch_to_yield = []
+
+ # If drop_last is True, iteration is over, otherwise...
+ if not self.drop_last and len(initial_data) > 0:
+ if not self.even_batches:
+ if len(batch_to_yield) > 0:
+ yield batch_to_yield
+ else:
+ # ... we yield the complete batch we had saved before if it has the proper length
+ if len(batch_to_yield) == self.batch_size:
+ yield batch_to_yield
+
+ # For degenerate cases where the dataset has less than num_process * batch_size samples
+ while len(initial_data) < self.num_processes * self.batch_size:
+ initial_data += initial_data
+
+ # If the last batch seen was of the proper size, it has been yielded by its process so we move to the next
+ if len(batch) == self.batch_size:
+ batch = []
+ idx += 1
+
+ # Make sure we yield a multiple of self.num_processes batches
+ cycle_index = 0
+ while idx % self.num_processes != 0 or len(batch) > 0:
+ end_index = cycle_index + self.batch_size - len(batch)
+ batch += initial_data[cycle_index:end_index]
+ if idx % self.num_processes == self.process_index:
+ yield batch
+ cycle_index = end_index
+ batch = []
+ idx += 1
+
+
+class IterableDatasetShard(IterableDataset):
+ """
+ Wraps a PyTorch `IterableDataset` to generate samples for one of the processes only. Instances of this class will
+ always yield a number of samples that is a round multiple of the actual batch size (depending of the value of
+ `split_batches`, this is either `batch_size` or `batch_size x num_processes`). Depending on the value of the
+ `drop_last` attribute of the batch sampler passed, it will either stop the iteration at the first batch that would
+ be too small or loop with indices from the beginning.
+
+ Args:
+ dataset (`torch.utils.data.dataset.IterableDataset`):
+ The batch sampler to split in several shards.
+ batch_size (`int`, *optional*, defaults to 1):
+ The size of the batches per shard (if `split_batches=False`) or the size of the batches (if
+ `split_batches=True`).
+ drop_last (`bool`, *optional*, defaults to `False`):
+ Whether or not to drop the last incomplete batch or complete the last batches by using the samples from the
+ beginning.
+ num_processes (`int`, *optional*, defaults to 1):
+ The number of processes running concurrently.
+ process_index (`int`, *optional*, defaults to 0):
+ The index of the current process.
+ split_batches (`bool`, *optional*, defaults to `False`):
+ Whether the shards should be created by splitting a batch to give a piece of it on each process, or by
+ yielding different full batches on each process.
+
+ On two processes with an iterable dataset yielding of `[0, 1, 2, 3, 4, 5, 6, 7]`, this will result in:
+
+ - the shard on process 0 to yield `[0, 1, 2, 3]` and the shard on process 1 to yield `[4, 5, 6, 7]` if this
+ argument is set to `False`.
+ - the shard on process 0 to yield `[0, 1, 4, 5]` and the sampler on process 1 to yield `[2, 3, 6, 7]` if
+ this argument is set to `True`.
+ """
+
+ def __init__(
+ self,
+ dataset: IterableDataset,
+ batch_size: int = 1,
+ drop_last: bool = False,
+ num_processes: int = 1,
+ process_index: int = 0,
+ split_batches: bool = False,
+ ):
+ if split_batches and batch_size > 1 and batch_size % num_processes != 0:
+ raise ValueError(
+ f"To use `IterableDatasetShard` in `split_batches` mode, the batch size ({batch_size}) "
+ f"needs to be a round multiple of the number of processes ({num_processes})."
+ )
+ self.dataset = dataset
+ self.batch_size = batch_size
+ self.drop_last = drop_last
+ self.num_processes = num_processes
+ self.process_index = process_index
+ self.split_batches = split_batches
+
+ def set_epoch(self, epoch):
+ self.epoch = epoch
+ if hasattr(self.dataset, "set_epoch"):
+ self.dataset.set_epoch(epoch)
+
+ def __len__(self):
+ # We will just raise the downstream error if the underlying dataset is not sized
+ if self.drop_last:
+ return (len(self.dataset) // (self.batch_size * self.num_processes)) * self.batch_size
+ else:
+ return math.ceil(len(self.dataset) / (self.batch_size * self.num_processes)) * self.batch_size
+
+ def __iter__(self):
+ if (
+ not hasattr(self.dataset, "set_epoch")
+ and hasattr(self.dataset, "generator")
+ and isinstance(self.dataset.generator, torch.Generator)
+ ):
+ self.dataset.generator.manual_seed(self.epoch)
+ real_batch_size = self.batch_size if self.split_batches else (self.batch_size * self.num_processes)
+ process_batch_size = (self.batch_size // self.num_processes) if self.split_batches else self.batch_size
+ process_slice = range(self.process_index * process_batch_size, (self.process_index + 1) * process_batch_size)
+
+ first_batch = None
+ current_batch = []
+ for element in self.dataset:
+ current_batch.append(element)
+ # Wait to have a full batch before yielding elements.
+ if len(current_batch) == real_batch_size:
+ for i in process_slice:
+ yield current_batch[i]
+ if first_batch is None:
+ first_batch = current_batch.copy()
+ current_batch = []
+
+ # Finished if drop_last is True, otherwise complete the last batch with elements from the beginning.
+ if not self.drop_last and len(current_batch) > 0:
+ if first_batch is None:
+ first_batch = current_batch.copy()
+ while len(current_batch) < real_batch_size:
+ current_batch += first_batch
+ for i in process_slice:
+ yield current_batch[i]
+
+
+class DataLoaderStateMixin:
+ """
+ Mixin class that adds a state to a `DataLoader` to keep track of the status inside the dataloader such as at the
+ end of the iteration, the number of items in the dataset in the last batch relative to the batch size, and other
+ useful information that might be needed.
+
+ **Available attributes:**
+
+ - **end_of_dataloader** (`bool`) -- Whether at the last iteration or batch
+ - **remainder** (`int`) -- The number of items that are remaining in the last batch, relative to the total
+ batch size
+
+ """
+
+ def __init_subclass__(cls, **kwargs):
+ cls.end_of_dataloader = False
+ cls.remainder = -1
+
+ def reset(self):
+ self.end_of_dataloader = False
+ self.remainder = -1
+
+ def begin(self):
+ "Prepares the gradient state for the current dataloader"
+ self.reset()
+ with suppress(Exception):
+ if not self._drop_last:
+ length = getattr(self.dataset, "total_dataset_length", len(self.dataset))
+ self.remainder = length % self.total_batch_size
+ self.gradient_state._add_dataloader(self)
+
+ def end(self):
+ "Cleans up the gradient state after exiting the dataloader"
+ self.gradient_state._remove_dataloader(self)
+
+
+class DataLoaderShard(DataLoader, DataLoaderStateMixin):
+ """
+ Subclass of a PyTorch `DataLoader` that will deal with device placement and current distributed setup.
+
+ Args:
+ dataset (`torch.utils.data.dataset.Dataset`):
+ The dataset to use to build this datalaoder.
+ device (`torch.device`, *optional*):
+ If passed, the device to put all batches on.
+ rng_types (list of `str` or [`~utils.RNGType`]):
+ The list of random number generators to synchronize at the beginning of each iteration. Should be one or
+ several of:
+
+ - `"torch"`: the base torch random number generator
+ - `"cuda"`: the CUDA random number generator (GPU only)
+ - `"xla"`: the XLA random number generator (TPU only)
+ - `"generator"`: an optional `torch.Generator`
+ synchronized_generator (`torch.Generator`, *optional*):
+ A random number generator to keep synchronized across processes.
+ skip_batches (`int`, *optional*, defaults to 0):
+ The number of batches to skip at the beginning.
+ **kwargs (additional keyword arguments, *optional*):
+ All other keyword arguments to pass to the regular `DataLoader` initialization.
+
+ **Available attributes:**
+
+ - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.
+ Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total
+ number of processes
+
+ - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
+ """
+
+ def __init__(
+ self,
+ dataset,
+ device=None,
+ rng_types=None,
+ synchronized_generator=None,
+ skip_batches=0,
+ _drop_last: bool = False,
+ **kwargs,
+ ):
+ super().__init__(dataset, **kwargs)
+ self.device = device
+ self.rng_types = rng_types
+ self.synchronized_generator = synchronized_generator
+ self.skip_batches = skip_batches
+ self.gradient_state = GradientState()
+ self._drop_last = _drop_last
+ self.iteration = 0
+
+ def __iter__(self):
+ if self.rng_types is not None:
+ synchronize_rng_states(self.rng_types, self.synchronized_generator)
+ self.begin()
+
+ self.set_epoch(self.iteration)
+ dataloader_iter = super().__iter__()
+ # We iterate one batch ahead to check when we are at the end
+ try:
+ current_batch = next(dataloader_iter)
+ except StopIteration:
+ yield
+
+ batch_index = 0
+ while True:
+ try:
+ # But we still move it to the device so it is done before `StopIteration` is reached
+ if self.device is not None:
+ current_batch = send_to_device(current_batch, self.device)
+ next_batch = next(dataloader_iter)
+ if batch_index >= self.skip_batches:
+ yield current_batch
+ batch_index += 1
+ current_batch = next_batch
+ except StopIteration:
+ self.end_of_dataloader = True
+ if batch_index >= self.skip_batches:
+ yield current_batch
+ break
+
+ self.iteration += 1
+ self.end()
+
+ def set_epoch(self, epoch: int):
+ # In case it is manually passed in, the user can set it to what they like
+ if self.iteration != epoch:
+ self.iteration = epoch
+ if hasattr(self.batch_sampler, "sampler") and hasattr(self.batch_sampler.sampler, "set_epoch"):
+ self.batch_sampler.sampler.set_epoch(epoch)
+ # We support if a custom `Dataset` implementation has `set_epoch`
+ # or in general HF datasets `Datasets`
+ elif hasattr(self.dataset, "set_epoch"):
+ self.dataset.set_epoch(epoch)
+
+ @property
+ def total_batch_size(self):
+ batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler
+ return (
+ batch_sampler.batch_size
+ if getattr(batch_sampler, "split_batches", False)
+ else (batch_sampler.batch_size * getattr(batch_sampler, "num_processes", 1))
+ )
+
+ @property
+ def total_dataset_length(self):
+ if hasattr(self.dataset, "total_length"):
+ return self.dataset.total_length
+ else:
+ return len(self.dataset)
+
+
+if is_torch_xla_available():
+ import torch_xla.distributed.parallel_loader as xpl
+
+ class MpDeviceLoaderWrapper(xpl.MpDeviceLoader):
+ """
+ Wrapper for the xpl.MpDeviceLoader class that knows the total batch size.
+
+ XLA preloading threads will all call DataLoaderShard's __iter__(). Remove rng_types from DataLoaderShard to
+ prevent it from using the XLA device in the preloading threads, and synchronize the RNG once from the main
+ thread only.
+
+ **Available attributes:**
+
+ - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.
+ Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total
+ number of processes
+
+ - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
+ """
+
+ def __init__(self, dataloader: DataLoaderShard, device: torch.device):
+ super().__init__(dataloader, device)
+ self._rng_types = self._loader.rng_types
+ self._loader.rng_types = None
+
+ def __iter__(self):
+ if self._rng_types is not None:
+ synchronize_rng_states(self._rng_types, self._loader.synchronized_generator)
+
+ return super().__iter__()
+
+ @property
+ def total_batch_size(self):
+ return self._loader.total_batch_size
+
+ @property
+ def total_dataset_length(self):
+ return self._loader.total_dataset_length
+
+ @property
+ def batch_sampler(self):
+ return self._loader.batch_sampler
+
+
+class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
+ """
+ Subclass of a PyTorch `DataLoader` that will iterate and preprocess on process 0 only, then dispatch on each
+ process their part of the batch.
+
+ Args:
+ split_batches (`bool`, *optional*, defaults to `False`):
+ Whether the resulting `DataLoader` should split the batches of the original data loader across devices or
+ yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of
+ `num_processes` batches at each iteration). Another way to see this is that the observed batch size will be
+ the same as the initial `dataloader` if this option is set to `True`, the batch size of the initial
+ `dataloader` multiplied by `num_processes` otherwise. Setting this option to `True` requires that the batch
+ size of the `dataloader` is a round multiple of `batch_size`.
+ skip_batches (`int`, *optional*, defaults to 0):
+ The number of batches to skip at the beginning of an iteration.
+
+ **Available attributes:**
+
+ - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.
+ Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total
+ number of processes
+
+ - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
+ """
+
+ def __init__(
+ self, dataset, split_batches: bool = False, skip_batches=0, _drop_last: bool = False, slice_fn=None, **kwargs
+ ):
+ shuffle = False
+ if is_torch_version(">=", "1.11.0"):
+ from torch.utils.data.datapipes.iter.combinatorics import ShufflerIterDataPipe
+
+ # We need to save the shuffling state of the DataPipe
+ if isinstance(dataset, ShufflerIterDataPipe):
+ shuffle = dataset._shuffle_enabled
+ super().__init__(dataset, **kwargs)
+ self.split_batches = split_batches
+ if shuffle:
+ torch.utils.data.graph_settings.apply_shuffle_settings(dataset, shuffle=shuffle)
+
+ self.gradient_state = GradientState()
+ self.state = AcceleratorState()
+ self._drop_last = _drop_last
+ self.skip_batches = skip_batches
+
+ self.slice_fn = slice_tensors if slice_fn is None else slice_fn
+ self.iteration = 0
+
+ def _fetch_batches(self, iterator):
+ batches, batch = None, None
+ # On process 0, we gather the batch to dispatch.
+ if self.state.process_index == 0:
+ try:
+ if self.split_batches:
+ # One batch of the main iterator is dispatched and split.
+ batch = next(iterator)
+ else:
+ # num_processes batches of the main iterator are concatenated then dispatched and split.
+ # We add the batches one by one so we have the remainder available when drop_last=False.
+ batches = []
+ for _ in range(self.state.num_processes):
+ batches.append(next(iterator))
+ try:
+ batch = concatenate(batches, dim=0)
+ except RuntimeError as e:
+ raise RuntimeError(
+ "You can't use batches of different size with `dispatch_batches=True` or when using an `IterableDataset`."
+ "either pass `dispatch_batches=False` and have each process fetch its own batch "
+ " or pass `split_batches=True`. By doing so, the main process will fetch a full batch and "
+ "slice it into `num_processes` batches for each process."
+ ) from e
+ # In both cases, we need to get the structure of the batch that we will broadcast on other
+ # processes to initialize the tensors with the right shape.
+ # data_structure, stop_iteration
+ batch_info = [get_data_structure(batch), False]
+ except StopIteration:
+ batch_info = [None, True]
+ else:
+ batch_info = [None, self._stop_iteration]
+ # This is inplace, so after this instruction, every process has the same `batch_info` as process 0.
+ broadcast_object_list(batch_info)
+ self._stop_iteration = batch_info[1]
+ if self._stop_iteration:
+ # If drop_last is False and split_batches is False, we may have a remainder to take care of.
+ if not self.split_batches and not self._drop_last:
+ if self.state.process_index == 0 and len(batches) > 0:
+ batch = concatenate(batches, dim=0)
+ batch_info = [get_data_structure(batch), False]
+ else:
+ batch_info = [None, True]
+ broadcast_object_list(batch_info)
+ return batch, batch_info
+
+ def __iter__(self):
+ self.begin()
+ self.set_epoch(self.iteration)
+ main_iterator = None
+ if is_torch_version(">=", "2.0.1"):
+ # NOTE PyTorch DataLoader adds forward compatibilities for DataPipes, which broadcasts
+ # shared seed to all dist processes. Thus, we need to create iterator for all dist processes.
+ # But, we only iterate through the DataLoader on process 0.
+ main_iterator = super().__iter__()
+ elif self.state.process_index == 0:
+ main_iterator = super().__iter__()
+ stop_iteration = False
+ self._stop_iteration = False
+ first_batch = None
+ next_batch, next_batch_info = self._fetch_batches(main_iterator)
+ batch_index = 0
+ while not stop_iteration:
+ batch, batch_info = next_batch, next_batch_info
+
+ if self.state.process_index != 0:
+ # Initialize tensors on other processes than process 0.
+ batch = initialize_tensors(batch_info[0])
+ batch = send_to_device(batch, self.state.device)
+ # Broadcast the batch before splitting it.
+ batch = broadcast(batch, from_process=0)
+
+ if not self._drop_last and first_batch is None:
+ # We keep at least num processes elements of the first batch to be able to complete the last batch
+ first_batch = self.slice_fn(
+ batch,
+ slice(0, self.state.num_processes),
+ process_index=self.state.process_index,
+ num_processes=self.state.num_processes,
+ )
+
+ if batch is None:
+ raise ValueError(
+ f"Batch does not contain any data (`{batch}`). At the end of all iterable data available before expected stop iteration."
+ )
+
+ observed_batch_size = find_batch_size(batch)
+ batch_size = observed_batch_size // self.state.num_processes
+
+ stop_iteration = self._stop_iteration
+ if not stop_iteration:
+ # We may still be at the end of the dataloader without knowing it yet: if there is nothing left in
+ # the dataloader since the number of batches is a round multiple of the number of processes.
+ next_batch, next_batch_info = self._fetch_batches(main_iterator)
+ # next_batch_info[0] is None when there are no more batches, otherwise we still need to process them.
+ if self._stop_iteration and next_batch_info[0] is None:
+ stop_iteration = True
+
+ if not self._drop_last and stop_iteration and observed_batch_size % self.state.num_processes != 0:
+ # If the last batch is not complete, let's add the first batch to it.
+ batch = concatenate([batch, first_batch], dim=0)
+ # Batch size computation above is wrong, it's off by 1 so we fix it.
+ batch_size += 1
+
+ data_slice = slice(self.state.process_index * batch_size, (self.state.process_index + 1) * batch_size)
+ batch = self.slice_fn(
+ batch,
+ data_slice,
+ process_index=self.state.process_index,
+ num_processes=self.state.num_processes,
+ )
+
+ if stop_iteration:
+ self.end_of_dataloader = True
+ self.remainder = observed_batch_size
+ if batch_index >= self.skip_batches:
+ yield batch
+ batch_index += 1
+ self.iteration += 1
+ self.end()
+
+ def set_epoch(self, epoch: int):
+ # In case it is manually passed in, the user can set it to what they like
+ if self.iteration != epoch:
+ self.iteration = epoch
+ if hasattr(self.batch_sampler.sampler, "set_epoch"):
+ self.batch_sampler.sampler.set_epoch(epoch)
+ elif hasattr(self.dataset, "set_epoch"):
+ self.dataset.set_epoch(epoch)
+
+ def __len__(self):
+ whole_length = super().__len__()
+ if self.split_batches:
+ return whole_length
+ elif self._drop_last:
+ return whole_length // self.state.num_processes
+ else:
+ return math.ceil(whole_length / self.state.num_processes)
+
+ @property
+ def total_batch_size(self):
+ return (
+ self.dataset.batch_size if self.split_batches else (self.dataset.batch_size * self.dataset.num_processes)
+ )
+
+ @property
+ def total_dataset_length(self):
+ return len(self.dataset)
+
+
+def prepare_data_loader(
+ dataloader: DataLoader,
+ device: Optional[torch.device] = None,
+ num_processes: Optional[int] = None,
+ process_index: Optional[int] = None,
+ split_batches: bool = False,
+ put_on_device: bool = False,
+ rng_types: Optional[List[Union[str, RNGType]]] = None,
+ dispatch_batches: Optional[bool] = None,
+ even_batches: bool = True,
+ slice_fn_for_dispatch: Optional[Callable] = None,
+ use_seedable_sampler: bool = False,
+) -> DataLoader:
+ """
+ Wraps a PyTorch `DataLoader` to generate batches for one of the processes only.
+
+ Depending on the value of the `drop_last` attribute of the `dataloader` passed, it will either stop the iteration
+ at the first batch that would be too small / not present on all processes or loop with indices from the beginning.
+
+ Args:
+ dataloader (`torch.utils.data.dataloader.DataLoader`):
+ The data loader to split across several devices.
+ device (`torch.device`):
+ The target device for the returned `DataLoader`.
+ num_processes (`int`, *optional*):
+ The number of processes running concurrently. Will default to the value given by
+ [`~state.AcceleratorState`].
+ process_index (`int`, *optional*):
+ The index of the current process. Will default to the value given by [`~state.AcceleratorState`].
+ split_batches (`bool`, *optional*, defaults to `False`):
+ Whether the resulting `DataLoader` should split the batches of the original data loader across devices or
+ yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of
+ `num_processes` batches at each iteration).
+
+ Another way to see this is that the observed batch size will be the same as the initial `dataloader` if
+ this option is set to `True`, the batch size of the initial `dataloader` multiplied by `num_processes`
+ otherwise.
+
+ Setting this option to `True` requires that the batch size of the `dataloader` is a round multiple of
+ `batch_size`.
+ put_on_device (`bool`, *optional*, defaults to `False`):
+ Whether or not to put the batches on `device` (only works if the batches are nested list, tuples or
+ dictionaries of tensors).
+ rng_types (list of `str` or [`~utils.RNGType`]):
+ The list of random number generators to synchronize at the beginning of each iteration. Should be one or
+ several of:
+
+ - `"torch"`: the base torch random number generator
+ - `"cuda"`: the CUDA random number generator (GPU only)
+ - `"xla"`: the XLA random number generator (TPU only)
+ - `"generator"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your
+ dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type.
+
+ dispatch_batches (`bool`, *optional*):
+ If set to `True`, the datalaoder prepared is only iterated through on the main process and then the batches
+ are split and broadcast to each process. Will default to `True` when the underlying dataset is an
+ `IterableDataset`, `False` otherwise.
+ even_batches (`bool`, *optional*, defaults to `True`):
+ If set to `True`, in cases where the total batch size across all processes does not exactly divide the
+ dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among
+ all workers.
+ slice_fn_for_dispatch (`Callable`, *optional*`):
+ If passed, this function will be used to slice tensors across `num_processes`. Will default to
+ [`~utils.slice_tensors`]. This argument is used only when `dispatch_batches` is set to `True` and will be
+ ignored otherwise.
+ use_seedable_sampler (`bool`, *optional*, defaults to `False`):
+ Whether to use the [`~data_loader.SeedableRandomSampler`] instead of a `RandomSampler` for better
+ reproducability. Comes at a cost of potentially different performances due to different shuffling
+ algorithms but ensures results will be the *exact* same. Should be paired with `set_seed()` at every
+ `self.set_epoch`
+
+ Returns:
+ `torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches
+
+
+
+ `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches`
+ equal to `False`
+
+
+ """
+ if dispatch_batches is None:
+ if not put_on_device:
+ dispatch_batches = False
+ else:
+ dispatch_batches = isinstance(dataloader.dataset, IterableDataset)
+
+ if dispatch_batches and not put_on_device:
+ raise ValueError("Using `dispatch_batches=True` requires `put_on_device=True`.")
+ # Grab defaults from AcceleratorState
+ state = AcceleratorState()
+ if num_processes is None:
+ num_processes = state.num_processes
+ if process_index is None:
+ process_index = state.process_index
+
+ # Sanity check
+ if split_batches:
+ if dataloader.batch_size is not None:
+ batch_size_for_check = dataloader.batch_size
+ else:
+ # For custom batch_sampler
+ if hasattr(dataloader.batch_sampler, "batch_size"):
+ batch_size_for_check = dataloader.batch_sampler.batch_size
+ else:
+ raise ValueError(
+ "In order to use `split_batches==True` you must have a `batch_size` attribute either in the passed "
+ "`dataloader` or `dataloader.batch_sampler` objects, and it has to return a natural number. "
+ "Your `dataloader.batch_size` is None and `dataloader.batch_sampler` "
+ f"(`{type(dataloader.batch_sampler)}`) does not have the `batch_size` attribute set."
+ )
+
+ if batch_size_for_check > 1 and batch_size_for_check % num_processes != 0:
+ raise ValueError(
+ f"To use a `DataLoader` in `split_batches` mode, the batch size ({dataloader.batch_size}) "
+ f"needs to be a round multiple of the number of processes ({num_processes})."
+ )
+
+ new_dataset = dataloader.dataset
+ # Iterable dataset doesn't like batch_sampler, but data_loader creates a default one for it
+ new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None
+ sampler_is_batch_sampler = False
+ synchronized_generator = None
+ sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler)
+ if sampler_is_batch_sampler:
+ sampler = getattr(dataloader.sampler, "sampler", None)
+ else:
+ sampler = getattr(dataloader.batch_sampler, "sampler", None)
+ if isinstance(sampler, RandomSampler) and use_seedable_sampler:
+ # When iterating through the dataloader during distributed processes
+ # we want to ensure that on each process we are iterating through the same
+ # samples in the same order if a seed is set. This requires a tweak
+ # to the `torch.utils.data.RandomSampler` class (if used).
+ sampler = SeedableRandomSampler(
+ data_source=sampler.data_source,
+ replacement=sampler.replacement,
+ num_samples=sampler._num_samples,
+ generator=getattr(sampler, "generator", torch.Generator()),
+ )
+
+ if isinstance(dataloader.sampler, RandomSampler) and state.distributed_type == DistributedType.XLA:
+ # isinstance(dataloader.sampler, RandomSampler) indicates the original dataloader has `shuffle` enabled.
+ generator = torch.Generator().manual_seed(42)
+ dataloader.generator = generator
+ dataloader.sampler.generator = generator
+ # No change if no multiprocess
+ if (num_processes != 1 or state.distributed_type == DistributedType.MEGATRON_LM) and not dispatch_batches:
+ if isinstance(new_dataset, IterableDataset):
+ if getattr(dataloader.dataset, "generator", None) is not None:
+ synchronized_generator = dataloader.dataset.generator
+ new_dataset = IterableDatasetShard(
+ new_dataset,
+ batch_size=dataloader.batch_size,
+ drop_last=dataloader.drop_last,
+ num_processes=num_processes,
+ process_index=process_index,
+ split_batches=split_batches,
+ )
+ else:
+ batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler
+ new_batch_sampler = BatchSamplerShard(
+ batch_sampler,
+ num_processes=num_processes,
+ process_index=process_index,
+ split_batches=split_batches,
+ even_batches=even_batches,
+ )
+
+ # We ignore all of those since they are all dealt with by our new_batch_sampler
+ ignore_kwargs = [
+ "batch_size",
+ "shuffle",
+ "sampler",
+ "batch_sampler",
+ "drop_last",
+ ]
+
+ if rng_types is not None and synchronized_generator is None and "generator" in rng_types:
+ rng_types.remove("generator")
+
+ kwargs = {
+ k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k])
+ for k in _PYTORCH_DATALOADER_KWARGS
+ if k not in ignore_kwargs
+ }
+
+ # Need to provide batch_size as batch_sampler is None for Iterable dataset
+ if new_batch_sampler is None:
+ kwargs["drop_last"] = dataloader.drop_last
+ kwargs["batch_size"] = (
+ dataloader.batch_size // num_processes if split_batches and not dispatch_batches else dataloader.batch_size
+ )
+ if dispatch_batches:
+ kwargs.pop("generator")
+ dataloader = DataLoaderDispatcher(
+ new_dataset,
+ split_batches=split_batches,
+ batch_sampler=new_batch_sampler,
+ _drop_last=dataloader.drop_last,
+ slice_fn=slice_fn_for_dispatch,
+ **kwargs,
+ )
+ elif sampler_is_batch_sampler:
+ dataloader = DataLoaderShard(
+ new_dataset,
+ device=device if put_on_device and state.distributed_type != DistributedType.XLA else None,
+ sampler=new_batch_sampler,
+ batch_size=dataloader.batch_size,
+ rng_types=rng_types,
+ _drop_last=dataloader.drop_last,
+ synchronized_generator=synchronized_generator,
+ **kwargs,
+ )
+ else:
+ dataloader = DataLoaderShard(
+ new_dataset,
+ device=device if put_on_device and state.distributed_type != DistributedType.XLA else None,
+ batch_sampler=new_batch_sampler,
+ rng_types=rng_types,
+ synchronized_generator=synchronized_generator,
+ _drop_last=dataloader.drop_last,
+ **kwargs,
+ )
+
+ if isinstance(sampler, SeedableRandomSampler) and use_seedable_sampler:
+ if sampler_is_batch_sampler:
+ dataloader.sampler.sampler = sampler
+ else:
+ dataloader.batch_sampler.sampler = sampler
+ if hasattr(dataloader.batch_sampler, "batch_sampler"):
+ dataloader.batch_sampler.batch_sampler.sampler = sampler
+ if state.distributed_type == DistributedType.XLA:
+ return MpDeviceLoaderWrapper(dataloader, device)
+ return dataloader
+
+
+class SkipBatchSampler(BatchSampler):
+ """
+ A `torch.utils.data.BatchSampler` that skips the first `n` batches of another `torch.utils.data.BatchSampler`.
+ """
+
+ def __init__(self, batch_sampler, skip_batches=0):
+ self.batch_sampler = batch_sampler
+ self.skip_batches = skip_batches
+
+ def __iter__(self):
+ for index, samples in enumerate(self.batch_sampler):
+ if index >= self.skip_batches:
+ yield samples
+
+ @property
+ def total_length(self):
+ return len(self.batch_sampler)
+
+ def __len__(self):
+ return len(self.batch_sampler) - self.skip_batches
+
+
+class SkipDataLoader(DataLoader):
+ """
+ Subclass of a PyTorch `DataLoader` that will skip the first batches.
+
+ Args:
+ dataset (`torch.utils.data.dataset.Dataset`):
+ The dataset to use to build this datalaoder.
+ skip_batches (`int`, *optional*, defaults to 0):
+ The number of batches to skip at the beginning.
+ kwargs:
+ All other keyword arguments to pass to the regular `DataLoader` initialization.
+ """
+
+ def __init__(self, dataset, skip_batches=0, **kwargs):
+ super().__init__(dataset, **kwargs)
+ self.skip_batches = skip_batches
+
+ def __iter__(self):
+ for index, batch in enumerate(super().__iter__()):
+ if index >= self.skip_batches:
+ yield batch
+
+
+def skip_first_batches(dataloader, num_batches=0):
+ """
+ Creates a `torch.utils.data.DataLoader` that will efficiently skip the first `num_batches`.
+ """
+ dataset = dataloader.dataset
+ sampler_is_batch_sampler = False
+ if isinstance(dataset, IterableDataset):
+ new_batch_sampler = None
+ else:
+ sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler)
+ batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler
+ new_batch_sampler = SkipBatchSampler(batch_sampler, skip_batches=num_batches)
+
+ # We ignore all of those since they are all dealt with by our new_batch_sampler
+ ignore_kwargs = [
+ "batch_size",
+ "shuffle",
+ "sampler",
+ "batch_sampler",
+ "drop_last",
+ ]
+
+ kwargs = {
+ k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k])
+ for k in _PYTORCH_DATALOADER_KWARGS
+ if k not in ignore_kwargs
+ }
+
+ # Need to provide batch_size as batch_sampler is None for Iterable dataset
+ if new_batch_sampler is None:
+ kwargs["drop_last"] = dataloader.drop_last
+ kwargs["batch_size"] = dataloader.batch_size
+
+ if isinstance(dataloader, DataLoaderDispatcher):
+ if new_batch_sampler is None:
+ # Need to manually skip batches in the dataloader
+ kwargs["skip_batches"] = num_batches
+ dataloader = DataLoaderDispatcher(
+ dataset,
+ split_batches=dataloader.split_batches,
+ batch_sampler=new_batch_sampler,
+ _drop_last=dataloader._drop_last,
+ **kwargs,
+ )
+ elif isinstance(dataloader, DataLoaderShard):
+ if new_batch_sampler is None:
+ # Need to manually skip batches in the dataloader
+ kwargs["skip_batches"] = num_batches
+ elif sampler_is_batch_sampler:
+ kwargs["sampler"] = new_batch_sampler
+ kwargs["batch_size"] = dataloader.batch_size
+ else:
+ kwargs["batch_sampler"] = new_batch_sampler
+ dataloader = DataLoaderShard(
+ dataset,
+ device=dataloader.device,
+ rng_types=dataloader.rng_types,
+ synchronized_generator=dataloader.synchronized_generator,
+ **kwargs,
+ )
+ else:
+ if new_batch_sampler is None:
+ # Need to manually skip batches in the dataloader
+ dataloader = SkipDataLoader(dataset, skip_batches=num_batches, **kwargs)
+ else:
+ dataloader = DataLoader(dataset, batch_sampler=new_batch_sampler, **kwargs)
+
+ return dataloader
diff --git a/venv/lib/python3.10/site-packages/accelerate/hooks.py b/venv/lib/python3.10/site-packages/accelerate/hooks.py
new file mode 100644
index 0000000000000000000000000000000000000000..e9a4b384f3cac39e7bedabb1f5e7c0320aae6a7f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/accelerate/hooks.py
@@ -0,0 +1,709 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import functools
+from typing import Dict, List, Mapping, Optional, Union
+
+import torch
+import torch.nn as nn
+
+from .state import PartialState
+from .utils import (
+ PrefixedDataset,
+ find_device,
+ named_module_tensors,
+ send_to_device,
+ set_module_tensor_to_device,
+)
+from .utils.modeling import get_non_persistent_buffers
+from .utils.other import recursive_getattr
+
+
+class ModelHook:
+ """
+ A hook that contains callbacks to be executed just before and after the forward method of a model. The difference
+ with PyTorch existing hooks is that they get passed along the kwargs.
+
+ Class attribute:
+ - **no_grad** (`bool`, *optional*, defaults to `False`) -- Whether or not to execute the actual forward pass under
+ the `torch.no_grad()` context manager.
+ """
+
+ no_grad = False
+
+ def init_hook(self, module):
+ """
+ To be executed when the hook is attached to the module.
+
+ Args:
+ module (`torch.nn.Module`): The module attached to this hook.
+ """
+ return module
+
+ def pre_forward(self, module, *args, **kwargs):
+ """
+ To be executed just before the forward method of the model.
+
+ Args:
+ module (`torch.nn.Module`): The module whose forward pass will be executed just after this event.
+ args (`Tuple[Any]`): The positional arguments passed to the module.
+ kwargs (`Dict[Str, Any]`): The keyword arguments passed to the module.
+
+ Returns:
+ `Tuple[Tuple[Any], Dict[Str, Any]]`: A tuple with the treated `args` and `kwargs`.
+ """
+ return args, kwargs
+
+ def post_forward(self, module, output):
+ """
+ To be executed just after the forward method of the model.
+
+ Args:
+ module (`torch.nn.Module`): The module whose forward pass been executed just before this event.
+ output (`Any`): The output of the module.
+
+ Returns:
+ `Any`: The processed `output`.
+ """
+ return output
+
+ def detach_hook(self, module):
+ """
+ To be executed when the hook is detached from a module.
+
+ Args:
+ module (`torch.nn.Module`): The module detached from this hook.
+ """
+ return module
+
+
+class SequentialHook(ModelHook):
+ """
+ A hook that can contain several hooks and iterates through them at each event.
+ """
+
+ def __init__(self, *hooks):
+ self.hooks = hooks
+
+ def init_hook(self, module):
+ for hook in self.hooks:
+ module = hook.init_hook(module)
+ return module
+
+ def pre_forward(self, module, *args, **kwargs):
+ for hook in self.hooks:
+ args, kwargs = hook.pre_forward(module, *args, **kwargs)
+ return args, kwargs
+
+ def post_forward(self, module, output):
+ for hook in self.hooks:
+ output = hook.post_forward(module, output)
+ return output
+
+ def detach_hook(self, module):
+ for hook in self.hooks:
+ module = hook.detach_hook(module)
+ return module
+
+
+def add_hook_to_module(module: nn.Module, hook: ModelHook, append: bool = False):
+ """
+ Adds a hook to a given module. This will rewrite the `forward` method of the module to include the hook, to remove
+ this behavior and restore the original `forward` method, use `remove_hook_from_module`.
+
+
+
+ If the module already contains a hook, this will replace it with the new hook passed by default. To chain two hooks
+ together, pass `append=True`, so it chains the current and new hook into an instance of the `SequentialHook` class.
+
+
+
+ Args:
+ module (`torch.nn.Module`):
+ The module to attach a hook to.
+ hook (`ModelHook`):
+ The hook to attach.
+ append (`bool`, *optional*, defaults to `False`):
+ Whether the hook should be chained with an existing one (if module already contains a hook) or not.
+
+ Returns:
+ `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can
+ be discarded).
+ """
+
+ if append and (getattr(module, "_hf_hook", None) is not None):
+ old_hook = module._hf_hook
+ remove_hook_from_module(module)
+ hook = SequentialHook(old_hook, hook)
+
+ if hasattr(module, "_hf_hook") and hasattr(module, "_old_forward"):
+ # If we already put some hook on this module, we replace it with the new one.
+ old_forward = module._old_forward
+ else:
+ old_forward = module.forward
+ module._old_forward = old_forward
+
+ module = hook.init_hook(module)
+ module._hf_hook = hook
+
+ def new_forward(module, *args, **kwargs):
+ args, kwargs = module._hf_hook.pre_forward(module, *args, **kwargs)
+ if module._hf_hook.no_grad:
+ with torch.no_grad():
+ output = module._old_forward(*args, **kwargs)
+ else:
+ output = module._old_forward(*args, **kwargs)
+ return module._hf_hook.post_forward(module, output)
+
+ # Overriding a GraphModuleImpl forward freezes the forward call and later modifications on the graph will fail.
+ # Reference: https://pytorch.slack.com/archives/C3PDTEV8E/p1705929610405409
+ if "GraphModuleImpl" in str(type(module)):
+ module.__class__.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward)
+ else:
+ module.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward)
+
+ return module
+
+
+def remove_hook_from_module(module: nn.Module, recurse=False):
+ """
+ Removes any hook attached to a module via `add_hook_to_module`.
+
+ Args:
+ module (`torch.nn.Module`): The module to attach a hook to.
+ recurse (`bool`, **optional**): Whether to remove the hooks recursively
+
+ Returns:
+ `torch.nn.Module`: The same module, with the hook detached (the module is modified in place, so the result can
+ be discarded).
+ """
+
+ if hasattr(module, "_hf_hook"):
+ module._hf_hook.detach_hook(module)
+ delattr(module, "_hf_hook")
+
+ if hasattr(module, "_old_forward"):
+ # Overriding a GraphModuleImpl forward freezes the forward call and later modifications on the graph will fail.
+ # Reference: https://pytorch.slack.com/archives/C3PDTEV8E/p1705929610405409
+ if "GraphModuleImpl" in str(type(module)):
+ module.__class__.forward = module._old_forward
+ else:
+ module.forward = module._old_forward
+ delattr(module, "_old_forward")
+
+ if recurse:
+ for child in module.children():
+ remove_hook_from_module(child, recurse)
+
+ return module
+
+
+class AlignDevicesHook(ModelHook):
+ """
+ A generic `ModelHook` that ensures inputs and model weights are on the same device for the forward pass of the
+ associated module, potentially offloading the weights after the forward pass.
+
+ Args:
+ execution_device (`torch.device`, *optional*):
+ The device on which inputs and model weights should be placed before the forward pass.
+ offload (`bool`, *optional*, defaults to `False`):
+ Whether or not the weights should be offloaded after the forward pass.
+ io_same_device (`bool`, *optional*, defaults to `False`):
+ Whether or not the output should be placed on the same device as the input was.
+ weights_map (`Mapping[str, torch.Tensor]`, *optional*):
+ When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.
+ offload_buffers (`bool`, *optional*, defaults to `False`):
+ Whether or not to include the associated module's buffers when offloading.
+ place_submodules (`bool`, *optional*, defaults to `False`):
+ Whether to place the submodules on `execution_device` during the `init_hook` event.
+ """
+
+ def __init__(
+ self,
+ execution_device: Optional[Union[int, str, torch.device]] = None,
+ offload: bool = False,
+ io_same_device: bool = False,
+ weights_map: Optional[Mapping] = None,
+ offload_buffers: bool = False,
+ place_submodules: bool = False,
+ skip_keys: Optional[Union[str, List[str]]] = None,
+ tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
+ ):
+ self.execution_device = execution_device
+ self.offload = offload
+ self.io_same_device = io_same_device
+ self.weights_map = weights_map
+ self.offload_buffers = offload_buffers
+ self.place_submodules = place_submodules
+ self.skip_keys = skip_keys
+
+ # Will contain the input device when `io_same_device=True`.
+ self.input_device = None
+ self.param_original_devices = {}
+ self.buffer_original_devices = {}
+ self.tied_params_names = set()
+
+ # The hook pre_forward/post_forward need to have knowledge of this dictionary, as with offloading we want to avoid duplicating memory
+ # for tied weights already loaded on the target execution device.
+ self.tied_params_map = tied_params_map
+
+ def __repr__(self):
+ return (
+ f"AlignDevicesHook(execution_device={self.execution_device}, offload={self.offload}, "
+ f"io_same_device={self.io_same_device}, offload_buffers={self.offload_buffers}, "
+ f"place_submodules={self.place_submodules}, skip_keys={repr(self.skip_keys)})"
+ )
+
+ def init_hook(self, module):
+ # In case the AlignDevicesHook is on meta device, ignore tied weights as data_ptr() is then always zero.
+ if self.execution_device == "meta" or self.execution_device == torch.device("meta"):
+ self.tied_params_map = None
+
+ if not self.offload and self.execution_device is not None:
+ for name, _ in named_module_tensors(module, recurse=self.place_submodules):
+ set_module_tensor_to_device(module, name, self.execution_device, tied_params_map=self.tied_params_map)
+ elif self.offload:
+ self.original_devices = {
+ name: param.device for name, param in named_module_tensors(module, recurse=self.place_submodules)
+ }
+ if self.weights_map is None:
+ self.weights_map = {
+ name: param.to("cpu")
+ for name, param in named_module_tensors(
+ module, include_buffers=self.offload_buffers, recurse=self.place_submodules
+ )
+ }
+ for name, _ in named_module_tensors(
+ module, include_buffers=self.offload_buffers, recurse=self.place_submodules, remove_non_persistent=True
+ ):
+ # When using disk offloading, we can not rely on `weights_map[name].data_ptr()` as the reference pointer,
+ # as we have no guarantee that safetensors' `file.get_tensor()` will always give the same pointer.
+ # As we have no reliable way to track the shared data pointer of tied weights in this case, we use tied_params_names: List[str]
+ # to add on the fly pointers to `tied_params_map` in the pre_forward call.
+ if (
+ self.tied_params_map is not None
+ and recursive_getattr(module, name).data_ptr() in self.tied_params_map
+ ):
+ self.tied_params_names.add(name)
+
+ set_module_tensor_to_device(module, name, "meta")
+
+ if not self.offload_buffers and self.execution_device is not None:
+ for name, _ in module.named_buffers(recurse=self.place_submodules):
+ set_module_tensor_to_device(
+ module, name, self.execution_device, tied_params_map=self.tied_params_map
+ )
+ elif self.offload_buffers and self.execution_device is not None:
+ for name in get_non_persistent_buffers(module, recurse=self.place_submodules):
+ set_module_tensor_to_device(
+ module, name, self.execution_device, tied_params_map=self.tied_params_map
+ )
+
+ return module
+
+ def pre_forward(self, module, *args, **kwargs):
+ if self.io_same_device:
+ self.input_device = find_device([args, kwargs])
+ if self.offload:
+ self.tied_pointers_to_remove = set()
+
+ for name, _ in named_module_tensors(
+ module,
+ include_buffers=self.offload_buffers,
+ recurse=self.place_submodules,
+ remove_non_persistent=True,
+ ):
+ fp16_statistics = None
+ value = self.weights_map[name]
+ if "weight" in name and name.replace("weight", "SCB") in self.weights_map.keys():
+ if value.dtype == torch.int8:
+ fp16_statistics = self.weights_map[name.replace("weight", "SCB")]
+
+ # In case we are using offloading with tied weights, we need to keep track of the offloaded weights
+ # that are loaded on device at this point, as we will need to remove them as well from the dictionary
+ # self.tied_params_map in order to allow to free memory.
+ if name in self.tied_params_names and value.data_ptr() not in self.tied_params_map:
+ self.tied_params_map[value.data_ptr()] = {}
+
+ if (
+ value is not None
+ and self.tied_params_map is not None
+ and value.data_ptr() in self.tied_params_map
+ and self.execution_device not in self.tied_params_map[value.data_ptr()]
+ ):
+ self.tied_pointers_to_remove.add((value.data_ptr(), self.execution_device))
+
+ set_module_tensor_to_device(
+ module,
+ name,
+ self.execution_device,
+ value=value,
+ fp16_statistics=fp16_statistics,
+ tied_params_map=self.tied_params_map,
+ )
+
+ return send_to_device(args, self.execution_device), send_to_device(
+ kwargs, self.execution_device, skip_keys=self.skip_keys
+ )
+
+ def post_forward(self, module, output):
+ if self.offload:
+ for name, _ in named_module_tensors(
+ module,
+ include_buffers=self.offload_buffers,
+ recurse=self.place_submodules,
+ remove_non_persistent=True,
+ ):
+ set_module_tensor_to_device(module, name, "meta")
+ if type(module).__name__ == "Linear8bitLt":
+ module.state.SCB = None
+ module.state.CxB = None
+
+ # We may have loaded tied weights into self.tied_params_map (avoiding to load them several times in e.g. submodules): remove them from
+ # this dictionary to allow the garbage collector to do its job.
+ for value_pointer, device in self.tied_pointers_to_remove:
+ del self.tied_params_map[value_pointer][device]
+ self.tied_pointers_to_remove = set()
+
+ if self.io_same_device and self.input_device is not None:
+ output = send_to_device(output, self.input_device, skip_keys=self.skip_keys)
+
+ return output
+
+ def detach_hook(self, module):
+ if self.offload:
+ for name, device in self.original_devices.items():
+ if device != torch.device("meta"):
+ set_module_tensor_to_device(module, name, device, value=self.weights_map.get(name, None))
+ return module
+
+
+def attach_execution_device_hook(
+ module: torch.nn.Module,
+ execution_device: Union[int, str, torch.device],
+ skip_keys: Optional[Union[str, List[str]]] = None,
+ preload_module_classes: Optional[List[str]] = None,
+ tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
+):
+ """
+ Recursively attaches `AlignDevicesHook` to all submodules of a given model to make sure they have the right
+ execution device
+
+ Args:
+ module (`torch.nn.Module`):
+ The module where we want to attach the hooks.
+ execution_device (`int`, `str` or `torch.device`):
+ The device on which inputs and model weights should be placed before the forward pass.
+ skip_keys (`str` or `List[str]`, *optional*):
+ A list of keys to ignore when moving inputs or outputs between devices.
+ preload_module_classes (`List[str]`, *optional*):
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
+ of the forward. This should only be used for classes that have submodules which are registered but not
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
+ tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`):
+ A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution
+ device, this parameter is useful to reuse the first available pointer of a shared weight for all others,
+ instead of duplicating memory.
+ """
+ if not hasattr(module, "_hf_hook") and len(module.state_dict()) > 0:
+ add_hook_to_module(
+ module,
+ AlignDevicesHook(execution_device, skip_keys=skip_keys, tied_params_map=tied_params_map),
+ )
+
+ # Break the recursion if we get to a preload module.
+ if preload_module_classes is not None and module.__class__.__name__ in preload_module_classes:
+ return
+
+ for child in module.children():
+ attach_execution_device_hook(child, execution_device, tied_params_map=tied_params_map)
+
+
+def attach_align_device_hook(
+ module: torch.nn.Module,
+ execution_device: Optional[torch.device] = None,
+ offload: bool = False,
+ weights_map: Optional[Mapping] = None,
+ offload_buffers: bool = False,
+ module_name: str = "",
+ skip_keys: Optional[Union[str, List[str]]] = None,
+ preload_module_classes: Optional[List[str]] = None,
+ tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
+):
+ """
+ Recursively attaches `AlignDevicesHook` to all submodules of a given model that have direct parameters and/or
+ buffers.
+
+ Args:
+ module (`torch.nn.Module`):
+ The module where we want to attach the hooks.
+ execution_device (`torch.device`, *optional*):
+ The device on which inputs and model weights should be placed before the forward pass.
+ offload (`bool`, *optional*, defaults to `False`):
+ Whether or not the weights should be offloaded after the forward pass.
+ weights_map (`Mapping[str, torch.Tensor]`, *optional*):
+ When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.
+ offload_buffers (`bool`, *optional*, defaults to `False`):
+ Whether or not to include the associated module's buffers when offloading.
+ module_name (`str`, *optional*, defaults to `""`):
+ The name of the module.
+ skip_keys (`str` or `List[str]`, *optional*):
+ A list of keys to ignore when moving inputs or outputs between devices.
+ preload_module_classes (`List[str]`, *optional*):
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
+ of the forward. This should only be used for classes that have submodules which are registered but not
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
+ tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`):
+ A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution
+ device, this parameter is useful to reuse the first available pointer of a shared weight for all others,
+ instead of duplicating memory.
+ """
+ # Attach the hook on this module if it has any direct tensor.
+ directs = named_module_tensors(module)
+ full_offload = (
+ offload and preload_module_classes is not None and module.__class__.__name__ in preload_module_classes
+ )
+
+ if len(list(directs)) > 0 or full_offload:
+ if weights_map is not None:
+ prefix = f"{module_name}." if len(module_name) > 0 else ""
+ prefixed_weights_map = PrefixedDataset(weights_map, prefix)
+ else:
+ prefixed_weights_map = None
+ hook = AlignDevicesHook(
+ execution_device=execution_device,
+ offload=offload,
+ weights_map=prefixed_weights_map,
+ offload_buffers=offload_buffers,
+ place_submodules=full_offload,
+ skip_keys=skip_keys,
+ tied_params_map=tied_params_map,
+ )
+ add_hook_to_module(module, hook, append=True)
+
+ # We stop the recursion in case we hit the full offload.
+ if full_offload:
+ return
+
+ # Recurse on all children of the module.
+ for child_name, child in module.named_children():
+ child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name
+ attach_align_device_hook(
+ child,
+ execution_device=execution_device,
+ offload=offload,
+ weights_map=weights_map,
+ offload_buffers=offload_buffers,
+ module_name=child_name,
+ preload_module_classes=preload_module_classes,
+ skip_keys=skip_keys,
+ tied_params_map=tied_params_map,
+ )
+
+
+def remove_hook_from_submodules(module: nn.Module):
+ """
+ Recursively removes all hooks attached on the submodules of a given model.
+
+ Args:
+ module (`torch.nn.Module`): The module on which to remove all hooks.
+ """
+ remove_hook_from_module(module)
+ for child in module.children():
+ remove_hook_from_submodules(child)
+
+
+def attach_align_device_hook_on_blocks(
+ module: nn.Module,
+ execution_device: Optional[Union[torch.device, Dict[str, torch.device]]] = None,
+ offload: Union[bool, Dict[str, bool]] = False,
+ weights_map: Mapping = None,
+ offload_buffers: bool = False,
+ module_name: str = "",
+ skip_keys: Optional[Union[str, List[str]]] = None,
+ preload_module_classes: Optional[List[str]] = None,
+ tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
+):
+ """
+ Attaches `AlignDevicesHook` to all blocks of a given model as needed.
+
+ Args:
+ module (`torch.nn.Module`):
+ The module where we want to attach the hooks.
+ execution_device (`torch.device` or `Dict[str, torch.device]`, *optional*):
+ The device on which inputs and model weights should be placed before the forward pass. It can be one device
+ for the whole module, or a dictionary mapping module name to device.
+ offload (`bool`, *optional*, defaults to `False`):
+ Whether or not the weights should be offloaded after the forward pass. It can be one boolean for the whole
+ module, or a dictionary mapping module name to boolean.
+ weights_map (`Mapping[str, torch.Tensor]`, *optional*):
+ When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.
+ offload_buffers (`bool`, *optional*, defaults to `False`):
+ Whether or not to include the associated module's buffers when offloading.
+ module_name (`str`, *optional*, defaults to `""`):
+ The name of the module.
+ skip_keys (`str` or `List[str]`, *optional*):
+ A list of keys to ignore when moving inputs or outputs between devices.
+ preload_module_classes (`List[str]`, *optional*):
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
+ of the forward. This should only be used for classes that have submodules which are registered but not
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
+ tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`):
+ A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution
+ device, this parameter is useful to reuse the first available pointer of a shared weight for all others,
+ instead of duplicating memory.
+ """
+ # If one device and one offload, we've got one hook.
+ if not isinstance(execution_device, Mapping) and not isinstance(offload, dict):
+ if not offload:
+ hook = AlignDevicesHook(
+ execution_device=execution_device,
+ io_same_device=True,
+ skip_keys=skip_keys,
+ place_submodules=True,
+ tied_params_map=tied_params_map,
+ )
+ add_hook_to_module(module, hook)
+ else:
+ attach_align_device_hook(
+ module,
+ execution_device=execution_device,
+ offload=True,
+ weights_map=weights_map,
+ offload_buffers=offload_buffers,
+ module_name=module_name,
+ skip_keys=skip_keys,
+ tied_params_map=tied_params_map,
+ )
+ return
+
+ if not isinstance(execution_device, Mapping):
+ execution_device = {key: execution_device for key in offload.keys()}
+ if not isinstance(offload, Mapping):
+ offload = {key: offload for key in execution_device.keys()}
+
+ if module_name in execution_device and module_name in offload and not offload[module_name]:
+ hook = AlignDevicesHook(
+ execution_device=execution_device[module_name],
+ offload_buffers=offload_buffers,
+ io_same_device=(module_name == ""),
+ place_submodules=True,
+ skip_keys=skip_keys,
+ tied_params_map=tied_params_map,
+ )
+ add_hook_to_module(module, hook)
+ attach_execution_device_hook(module, execution_device[module_name], tied_params_map=tied_params_map)
+ elif module_name in execution_device and module_name in offload:
+ attach_align_device_hook(
+ module,
+ execution_device=execution_device[module_name],
+ offload=True,
+ weights_map=weights_map,
+ offload_buffers=offload_buffers,
+ module_name=module_name,
+ skip_keys=skip_keys,
+ preload_module_classes=preload_module_classes,
+ tied_params_map=tied_params_map,
+ )
+ if not hasattr(module, "_hf_hook"):
+ hook = AlignDevicesHook(
+ execution_device=execution_device[module_name],
+ io_same_device=(module_name == ""),
+ skip_keys=skip_keys,
+ tied_params_map=tied_params_map,
+ )
+ add_hook_to_module(module, hook)
+ attach_execution_device_hook(
+ module,
+ execution_device[module_name],
+ preload_module_classes=preload_module_classes,
+ skip_keys=skip_keys,
+ tied_params_map=tied_params_map,
+ )
+ elif module_name == "":
+ hook = AlignDevicesHook(
+ execution_device=execution_device.get(""),
+ io_same_device=True,
+ skip_keys=skip_keys,
+ tied_params_map=tied_params_map,
+ )
+ add_hook_to_module(module, hook)
+
+ for child_name, child in module.named_children():
+ child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name
+ attach_align_device_hook_on_blocks(
+ child,
+ execution_device=execution_device,
+ offload=offload,
+ weights_map=weights_map,
+ offload_buffers=offload_buffers,
+ module_name=child_name,
+ preload_module_classes=preload_module_classes,
+ skip_keys=skip_keys,
+ tied_params_map=tied_params_map,
+ )
+
+
+class CpuOffload(ModelHook):
+ """
+ Offloads a model on the CPU until its forward pass is called. The model will not be offloaded back to the CPU after
+ the forward, the user needs to call the `init_hook` method again for this.
+
+ Args:
+ execution_device(`str`, `int` or `torch.device`, *optional*):
+ The device on which the model should be executed. Will default to the MPS device if it's available, then
+ GPU 0 if there is a GPU, and finally to the CPU.
+ prev_module_hook (`UserCpuOffloadHook`, *optional*):
+ The hook sent back by [`cpu_offload_with_hook`] for a previous model in the pipeline you are running. If
+ passed, its offload method will be called just before the forward of the model to which this hook is
+ attached.
+ """
+
+ def __init__(
+ self,
+ execution_device: Optional[Union[str, int, torch.device]] = None,
+ prev_module_hook: Optional["UserCpuOffloadHook"] = None,
+ ):
+ self.prev_module_hook = prev_module_hook
+
+ self.execution_device = execution_device if execution_device is not None else PartialState().default_device
+
+ def init_hook(self, module):
+ return module.to("cpu")
+
+ def pre_forward(self, module, *args, **kwargs):
+ if self.prev_module_hook is not None:
+ self.prev_module_hook.offload()
+ module.to(self.execution_device)
+ return send_to_device(args, self.execution_device), send_to_device(kwargs, self.execution_device)
+
+
+class UserCpuOffloadHook:
+ """
+ A simple hook grouping a model and a `ModelHook`, which provides easy APIs for to call the init method of the hook
+ or remove it entirely.
+ """
+
+ def __init__(self, model, hook):
+ self.model = model
+ self.hook = hook
+
+ def offload(self):
+ self.hook.init_hook(self.model)
+
+ def remove(self):
+ remove_hook_from_module(self.model)
diff --git a/venv/lib/python3.10/site-packages/accelerate/inference.py b/venv/lib/python3.10/site-packages/accelerate/inference.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf4cf15017938e34867d4eeaad120745051ab385
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/accelerate/inference.py
@@ -0,0 +1,188 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import math
+from types import MethodType
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+from .state import PartialState
+from .utils import (
+ calculate_maximum_sizes,
+ convert_bytes,
+ copy_tensor_to_devices,
+ ignorant_find_batch_size,
+ infer_auto_device_map,
+ is_pippy_available,
+ pad_input_tensors,
+ send_to_device,
+)
+
+
+if is_pippy_available():
+ from pippy.IR import Pipe, PipeSplitWrapper, annotate_split_points
+ from pippy.PipelineStage import PipelineStage
+
+
+def generate_device_map(model, num_processes: int = 1, no_split_module_classes=None, max_memory: dict = None):
+ """
+ Calculates the device map for `model` with an offset for PiPPy
+ """
+ if num_processes == 1:
+ return infer_auto_device_map(model, no_split_module_classes=no_split_module_classes, clean_result=False)
+ if max_memory is None:
+ model_size, shared = calculate_maximum_sizes(model)
+
+ # Split into `n` chunks for each GPU
+ memory = (model_size + shared[0]) / num_processes
+ memory = convert_bytes(memory)
+ value, ending = memory.split(" ")
+
+ # Add a chunk to deal with potential extra shared memory instances
+ memory = math.ceil(float(value)) * 1.1
+ memory = f"{memory} {ending}"
+ max_memory = {i: memory for i in range(num_processes)}
+ device_map = infer_auto_device_map(
+ model,
+ max_memory=max_memory,
+ no_split_module_classes=no_split_module_classes,
+ clean_result=False,
+ )
+ return device_map
+
+
+def find_pippy_batch_size(args, kwargs):
+ found_batch_size = None
+ if args is not None:
+ for arg in args:
+ found_batch_size = ignorant_find_batch_size(arg)
+ if found_batch_size is not None:
+ break
+ if kwargs is not None and found_batch_size is None:
+ for kwarg in kwargs.values():
+ found_batch_size = ignorant_find_batch_size(kwarg)
+ if found_batch_size is not None:
+ break
+ return found_batch_size
+
+
+def build_pipeline(model, split_points, args, kwargs, num_chunks):
+ """
+ Attaches the split points to the model based on `self.device_map` and generates a `PipelineStage`. Requires passing
+ in needed `args` and `kwargs` as the model needs on the CPU.
+
+ Users can pass in custom `num_chunks` as an optional hyper-parameter. By default will use
+ `AcceleratorState.num_processes`
+ """
+ # We need to annotate the split points in the model for PiPPy
+ state = PartialState()
+ annotate_split_points(model, {split_point: PipeSplitWrapper.SplitPoint.BEGINNING for split_point in split_points})
+ found_batch_size = find_pippy_batch_size(args, kwargs)
+ if found_batch_size != num_chunks:
+ if args is not None:
+ args = pad_input_tensors(args, found_batch_size, num_chunks)
+ if kwargs is not None:
+ kwargs = pad_input_tensors(kwargs, found_batch_size, num_chunks)
+ pipe = Pipe.from_tracing(model, num_chunks=num_chunks, example_args=args, example_kwargs=kwargs)
+ stage = PipelineStage(pipe, state.local_process_index, device=state.device)
+
+ return stage
+
+
+def pippy_forward(forward, num_chunks, gather_output, *args, **kwargs):
+ state = PartialState()
+ output = None
+
+ if state.num_processes == 1:
+ output = forward(*args, **kwargs)
+ elif state.is_local_main_process:
+ found_batch_size = find_pippy_batch_size(args, kwargs)
+ if found_batch_size is None:
+ raise ValueError("Could not find batch size from args or kwargs")
+ else:
+ if found_batch_size != num_chunks:
+ args = pad_input_tensors(args, found_batch_size, num_chunks)
+ kwargs = pad_input_tensors(kwargs, found_batch_size, num_chunks)
+ forward(*args, **kwargs)
+ elif state.is_last_process:
+ output = forward()
+ else:
+ forward()
+ if gather_output:
+ # Each node will get a copy of the full output which is only on the last GPU
+ output = copy_tensor_to_devices(output)
+ return output
+
+
+def prepare_pippy(
+ model,
+ split_points: Optional[Union[str, List[str]]] = "auto",
+ no_split_module_classes: Optional[List[str]] = None,
+ example_args: Optional[Tuple[Any]] = (),
+ example_kwargs: Optional[Dict[str, Any]] = None,
+ num_chunks: Optional[int] = None,
+ gather_output: Optional[bool] = False,
+):
+ """
+ Wraps `model` for pipeline parallel inference.
+
+ Args:
+ model (`torch.nn.Module`):
+ A model we want to split for pipeline-parallel inference
+ split_points (`str` or `List[str]`, defaults to 'auto'):
+ How to generate the split points and chunk the model across each GPU. 'auto' will find the best balanced
+ split given any model. Should be a list of layer names in the model to split by otherwise.
+ no_split_module_classes (`List[str]`):
+ A list of class names for layers we don't want to be split.
+ example_args (tuple of model inputs):
+ The expected inputs for the model that uses order-based inputs. Recommended to use this method if possible.
+ example_kwargs (dict of model inputs)
+ The expected inputs for the model that uses dictionary-based inputs. This is a *highly* limiting structure
+ that requires the same keys be present at *all* inference calls. Not recommended unless the prior condition
+ is true for all cases.
+ num_chunks (`int`, defaults to the number of available GPUs):
+ The number of different stages the Pipeline will have. By default it will assign one chunk per GPU, but
+ this can be tuned and played with. In general one should have num_chunks >= num_gpus.
+ gather_output (`bool`, defaults to `False`):
+ If `True`, the output from the last GPU (which holds the true outputs) is sent across to all GPUs.
+ """
+ if not is_pippy_available():
+ raise ImportError(
+ "`pippy` was not found to be installed on your system. Please "
+ "install using `pip install torchpippy` or ensure you have at least version 0.2.0"
+ )
+ state = PartialState()
+ example_args = send_to_device(example_args, "cpu")
+ example_kwargs = send_to_device(example_kwargs, "cpu")
+ if num_chunks is None:
+ num_chunks = state.num_processes
+ if split_points == "auto":
+ device_map = generate_device_map(model, num_chunks, no_split_module_classes=no_split_module_classes)
+ split_points = []
+ for i in range(1, num_chunks):
+ split_points.append(next(k for k, v in device_map.items() if v == i))
+ model.hf_split_points = split_points
+ stage = build_pipeline(model, split_points, example_args, example_kwargs, num_chunks)
+ model._original_forward = model.forward
+ model._original_call = model.__call__
+ model.pippy_stage = stage
+ model.hf_split_points = split_points
+
+ def forward(*args, **kwargs):
+ return pippy_forward(stage.forward, num_chunks, gather_output, *args, **kwargs)
+
+ # To act like a decorator so that it can be popped when doing `extract_model_from_parallel`
+ # Note: creates an infinite recursion loop with `generate`
+ model_forward = MethodType(forward, model)
+ forward.__wrapped__ = model_forward
+ model.forward = forward
+ return model
diff --git a/venv/lib/python3.10/site-packages/accelerate/launchers.py b/venv/lib/python3.10/site-packages/accelerate/launchers.py
new file mode 100644
index 0000000000000000000000000000000000000000..0265b25187f813356cfb49768097d6cf2599b0d3
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/accelerate/launchers.py
@@ -0,0 +1,258 @@
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+import tempfile
+
+import torch
+
+from .state import AcceleratorState, PartialState
+from .utils import (
+ PrecisionType,
+ PrepareForLaunch,
+ are_libraries_initialized,
+ check_cuda_p2p_ib_support,
+ get_gpu_info,
+ is_mps_available,
+ patch_environment,
+)
+
+
+def test_launch():
+ "Verify a `PartialState` can be initialized."
+ _ = PartialState()
+
+
+def notebook_launcher(
+ function,
+ args=(),
+ num_processes=None,
+ mixed_precision="no",
+ use_port="29500",
+ master_addr="127.0.0.1",
+ node_rank=0,
+ num_nodes=1,
+):
+ """
+ Launches a training function, using several processes or multiple nodes if it's possible in the current environment
+ (TPU with multiple cores for instance).
+
+
+
+ To use this function absolutely zero calls to a CUDA device must be made in the notebook session before calling. If
+ any have been made, you will need to restart the notebook and make sure no cells use any CUDA capability.
+
+ Setting `ACCELERATE_DEBUG_MODE="1"` in your environment will run a test before truly launching to ensure that none
+ of those calls have been made.
+
+
+
+ Args:
+ function (`Callable`):
+ The training function to execute. If it accepts arguments, the first argument should be the index of the
+ process run.
+ args (`Tuple`):
+ Tuple of arguments to pass to the function (it will receive `*args`).
+ num_processes (`int`, *optional*):
+ The number of processes to use for training. Will default to 8 in Colab/Kaggle if a TPU is available, to
+ the number of GPUs available otherwise.
+ mixed_precision (`str`, *optional*, defaults to `"no"`):
+ If `fp16` or `bf16`, will use mixed precision training on multi-GPU.
+ use_port (`str`, *optional*, defaults to `"29500"`):
+ The port to use to communicate between processes when launching a multi-GPU training.
+ master_addr (`str`, *optional*, defaults to `"127.0.0.1"`):
+ The address to use for communication between processes.
+ node_rank (`int`, *optional*, defaults to 0):
+ The rank of the current node.
+ num_nodes (`int`, *optional*, defaults to 1):
+ The number of nodes to use for training.
+
+ Example:
+
+ ```python
+ # Assume this is defined in a Jupyter Notebook on an instance with two GPUs
+ from accelerate import notebook_launcher
+
+
+ def train(*args):
+ # Your training function here
+ ...
+
+
+ notebook_launcher(train, args=(arg1, arg2), num_processes=2, mixed_precision="fp16")
+ ```
+ """
+ # Are we in a google colab or a Kaggle Kernel?
+ in_colab = False
+ in_kaggle = False
+ if any(key.startswith("KAGGLE") for key in os.environ.keys()):
+ in_kaggle = True
+ elif "IPython" in sys.modules:
+ in_colab = "google.colab" in str(sys.modules["IPython"].get_ipython())
+
+ try:
+ mixed_precision = PrecisionType(mixed_precision.lower())
+ except ValueError:
+ raise ValueError(
+ f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}."
+ )
+
+ if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME", None) is not None):
+ # TPU launch
+ import torch_xla.distributed.xla_multiprocessing as xmp
+
+ if len(AcceleratorState._shared_state) > 0:
+ raise ValueError(
+ "To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
+ "your training function. Restart your notebook and make sure no cells initializes an "
+ "`Accelerator`."
+ )
+ if num_processes is None:
+ num_processes = 8
+
+ launcher = PrepareForLaunch(function, distributed_type="TPU")
+ print(f"Launching a training on {num_processes} TPU cores.")
+ xmp.spawn(launcher, args=args, nprocs=num_processes, start_method="fork")
+ elif in_colab and get_gpu_info()[1] < 2:
+ # No need for a distributed launch otherwise as it's either CPU or one GPU.
+ if torch.cuda.is_available():
+ print("Launching training on one GPU.")
+ else:
+ print("Launching training on one CPU.")
+ function(*args)
+ else:
+ if num_processes is None:
+ raise ValueError(
+ "You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call."
+ )
+ if node_rank >= num_nodes:
+ raise ValueError("The node_rank must be less than the number of nodes.")
+ if num_processes > 1:
+ # Multi-GPU launch
+ from torch.multiprocessing import start_processes
+ from torch.multiprocessing.spawn import ProcessRaisedException
+
+ if len(AcceleratorState._shared_state) > 0:
+ raise ValueError(
+ "To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
+ "inside your training function. Restart your notebook and make sure no cells initializes an "
+ "`Accelerator`."
+ )
+ # Check for specific libraries known to initialize CUDA that users constantly use
+ problematic_imports = are_libraries_initialized("bitsandbytes")
+ if len(problematic_imports) > 0:
+ err = (
+ "Could not start distributed process. Libraries known to initialize CUDA upon import have been "
+ "imported already. Please keep these imports inside your training function to try and help with this:"
+ )
+ for lib_name in problematic_imports:
+ err += f"\n\t* `{lib_name}`"
+ raise RuntimeError(err)
+
+ patched_env = dict(
+ nproc=num_processes,
+ node_rank=node_rank,
+ world_size=num_nodes * num_processes,
+ master_addr=master_addr,
+ master_port=use_port,
+ mixed_precision=mixed_precision,
+ )
+
+ # Check for CUDA P2P and IB issues
+ if not check_cuda_p2p_ib_support():
+ patched_env["nccl_p2p_disable"] = "1"
+ patched_env["nccl_ib_disable"] = "1"
+
+ # torch.distributed will expect a few environment variable to be here. We set the ones common to each
+ # process here (the other ones will be set be the launcher).
+ with patch_environment(**patched_env):
+ # First dummy launch
+ if os.environ.get("ACCELERATE_DEBUG_MODE", "false").lower() == "true":
+ launcher = PrepareForLaunch(test_launch, distributed_type="MULTI_GPU")
+ try:
+ start_processes(launcher, args=(), nprocs=num_processes, start_method="fork")
+ except ProcessRaisedException as e:
+ err = "An issue was found when verifying a stable environment for the notebook launcher."
+ if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
+ raise RuntimeError(
+ f"{err}"
+ "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
+ "Please review your imports and test them when running the `notebook_launcher()` to identify "
+ "which one is problematic and causing CUDA to be initialized."
+ ) from e
+ else:
+ raise RuntimeError(f"{err} The following error was raised: {e}") from e
+ # Now the actual launch
+ launcher = PrepareForLaunch(function, distributed_type="MULTI_GPU")
+ print(f"Launching training on {num_processes} GPUs.")
+ try:
+ start_processes(launcher, args=args, nprocs=num_processes, start_method="fork")
+ except ProcessRaisedException as e:
+ if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
+ raise RuntimeError(
+ "CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
+ "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
+ "Please review your imports and test them when running the `notebook_launcher()` to identify "
+ "which one is problematic and causing CUDA to be initialized."
+ ) from e
+ else:
+ raise RuntimeError(f"An issue was found when launching the training: {e}") from e
+
+ else:
+ # No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
+ if is_mps_available():
+ os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
+ print("Launching training on MPS.")
+ elif torch.cuda.is_available():
+ print("Launching training on one GPU.")
+ else:
+ print("Launching training on CPU.")
+ function(*args)
+
+
+def debug_launcher(function, args=(), num_processes=2):
+ """
+ Launches a training function using several processes on CPU for debugging purposes.
+
+
+
+ This function is provided for internal testing and debugging, but it's not intended for real trainings. It will
+ only use the CPU.
+
+
+
+ Args:
+ function (`Callable`):
+ The training function to execute.
+ args (`Tuple`):
+ Tuple of arguments to pass to the function (it will receive `*args`).
+ num_processes (`int`, *optional*, defaults to 2):
+ The number of processes to use for training.
+ """
+ from torch.multiprocessing import start_processes
+
+ with tempfile.NamedTemporaryFile() as tmp_file:
+ # torch.distributed will expect a few environment variable to be here. We set the ones common to each
+ # process here (the other ones will be set be the launcher).
+ with patch_environment(
+ world_size=num_processes,
+ master_addr="127.0.0.1",
+ master_port="29500",
+ accelerate_mixed_precision="no",
+ accelerate_debug_rdv_file=tmp_file.name,
+ accelerate_use_cpu="yes",
+ ):
+ launcher = PrepareForLaunch(function, debug=True)
+ start_processes(launcher, args=args, nprocs=num_processes, start_method="fork")
diff --git a/venv/lib/python3.10/site-packages/accelerate/local_sgd.py b/venv/lib/python3.10/site-packages/accelerate/local_sgd.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f2657fcc8b057b4396cf299e6cf681fa7b83aa8
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/accelerate/local_sgd.py
@@ -0,0 +1,102 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import torch
+
+from accelerate import Accelerator, DistributedType
+
+
+class LocalSGD:
+ """
+ A helper class to support local SGD on top of Accelerator. It simply runs a given number of updates independently
+ on each device, and averages model weights every K synchronization step.
+
+ It should be used only in the multi-GPU (or multi-CPU) setup without extensions such as DeepSpeed. In particular,
+ this is a simple implementation that cannot support scenarios such as model parallelism.
+
+
+ Although we are not aware of the true origins of this simple approach, the idea of local SGD is quite old and goes
+ back to at least:
+
+ Zhang, J., De Sa, C., Mitliagkas, I., & Ré, C. (2016). [Parallel SGD: When does averaging help?. arXiv preprint
+ arXiv:1606.07365.](https://arxiv.org/abs/1606.07365)
+
+ We credit the term Local SGD to the following paper (but there might be earlier references we are not aware of).
+
+ Stich, Sebastian Urban. ["Local SGD Converges Fast and Communicates Little." ICLR 2019-International Conference on
+ Learning Representations. No. CONF. 2019.](https://arxiv.org/abs/1805.09767)
+
+ """
+
+ def __enter__(self):
+ if self.enabled:
+ self.model_sync_obj = self.model.no_sync()
+ self.model_sync_obj.__enter__()
+
+ return self
+
+ def __exit__(self, type, value, tb):
+ if self.enabled:
+ # Average all models on exit
+ self._sync_and_avg_model_params()
+ self.model_sync_obj.__exit__(type, value, tb)
+
+ def __init__(self, accelerator: Accelerator, model: torch.nn.Module, local_sgd_steps: int, enabled: bool = True):
+ """
+ Constructor.
+
+ Args:
+ model (`torch.nn.Module):
+ The model whose parameters we need to average.
+ accelerator (`Accelerator`):
+ Accelerator object.
+ local_sgd_steps (`int`):
+ A number of local SGD steps (before model parameters are synchronized).
+ enabled (`bool):
+ Local SGD is disabled if this parameter set to `False`.
+ """
+ if accelerator.distributed_type not in [
+ DistributedType.NO,
+ DistributedType.MULTI_CPU,
+ DistributedType.MULTI_GPU,
+ DistributedType.MULTI_MLU,
+ DistributedType.MULTI_NPU,
+ ]:
+ raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)")
+ self.enabled = enabled and accelerator.distributed_type != DistributedType.NO
+ self.num_steps = 0
+ if self.enabled:
+ self.accelerator = accelerator
+ self.model = model
+ self.local_sgd_steps = local_sgd_steps
+
+ def step(self):
+ """
+ This function makes a "step" and synchronizes model parameters if necessary.
+ """
+ self.num_steps += 1
+ if not self.enabled:
+ return
+
+ if self.num_steps % self.local_sgd_steps == 0:
+ self._sync_and_avg_model_params()
+
+ def _sync_and_avg_model_params(self):
+ """
+ Synchronize + Average model parameters across all GPUs
+ """
+
+ self.accelerator.wait_for_everyone()
+ with self.accelerator.autocast():
+ for param in self.model.parameters():
+ param.data = self.accelerator.reduce(param.data, reduction="mean")
diff --git a/venv/lib/python3.10/site-packages/accelerate/logging.py b/venv/lib/python3.10/site-packages/accelerate/logging.py
new file mode 100644
index 0000000000000000000000000000000000000000..ebb8c1eb830e54e3f2870cb3a84afd33b7631ea6
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/accelerate/logging.py
@@ -0,0 +1,123 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import functools
+import logging
+import os
+
+from .state import PartialState
+
+
+class MultiProcessAdapter(logging.LoggerAdapter):
+ """
+ An adapter to assist with logging in multiprocess.
+
+ `log` takes in an additional `main_process_only` kwarg, which dictates whether it should be called on all processes
+ or only the main executed one. Default is `main_process_only=True`.
+
+ Does not require an `Accelerator` object to be created first.
+ """
+
+ @staticmethod
+ def _should_log(main_process_only):
+ "Check if log should be performed"
+ state = PartialState()
+ return not main_process_only or (main_process_only and state.is_main_process)
+
+ def log(self, level, msg, *args, **kwargs):
+ """
+ Delegates logger call after checking if we should log.
+
+ Accepts a new kwarg of `main_process_only`, which will dictate whether it will be logged across all processes
+ or only the main executed one. Default is `True` if not passed
+
+ Also accepts "in_order", which if `True` makes the processes log one by one, in order. This is much easier to
+ read, but comes at the cost of sometimes needing to wait for the other processes. Default is `False` to not
+ break with the previous behavior.
+
+ `in_order` is ignored if `main_process_only` is passed.
+ """
+ if PartialState._shared_state == {}:
+ raise RuntimeError(
+ "You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility."
+ )
+ main_process_only = kwargs.pop("main_process_only", True)
+ in_order = kwargs.pop("in_order", False)
+
+ if self.isEnabledFor(level):
+ if self._should_log(main_process_only):
+ msg, kwargs = self.process(msg, kwargs)
+ self.logger.log(level, msg, *args, **kwargs)
+
+ elif in_order:
+ state = PartialState()
+ for i in range(state.num_processes):
+ if i == state.process_index:
+ msg, kwargs = self.process(msg, kwargs)
+ self.logger.log(level, msg, *args, **kwargs)
+ state.wait_for_everyone()
+
+ @functools.lru_cache(None)
+ def warning_once(self, *args, **kwargs):
+ """
+ This method is identical to `logger.warning()`, but will emit the warning with the same message only once
+
+ Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the
+ cache. The assumption here is that all warning messages are unique across the code. If they aren't then need to
+ switch to another type of cache that includes the caller frame information in the hashing function.
+ """
+ self.warning(*args, **kwargs)
+
+
+def get_logger(name: str, log_level: str = None):
+ """
+ Returns a `logging.Logger` for `name` that can handle multiprocessing.
+
+ If a log should be called on all processes, pass `main_process_only=False` If a log should be called on all
+ processes and in order, also pass `in_order=True`
+
+ Args:
+ name (`str`):
+ The name for the logger, such as `__file__`
+ log_level (`str`, *optional*):
+ The log level to use. If not passed, will default to the `LOG_LEVEL` environment variable, or `INFO` if not
+
+ Example:
+
+ ```python
+ >>> from accelerate.logging import get_logger
+ >>> from accelerate import Accelerator
+
+ >>> logger = get_logger(__name__)
+
+ >>> accelerator = Accelerator()
+ >>> logger.info("My log", main_process_only=False)
+ >>> logger.debug("My log", main_process_only=True)
+
+ >>> logger = get_logger(__name__, log_level="DEBUG")
+ >>> logger.info("My log")
+ >>> logger.debug("My second log")
+
+ >>> array = ["a", "b", "c", "d"]
+ >>> letter_at_rank = array[accelerator.process_index]
+ >>> logger.info(letter_at_rank, in_order=True)
+ ```
+ """
+ if log_level is None:
+ log_level = os.environ.get("ACCELERATE_LOG_LEVEL", None)
+ logger = logging.getLogger(name)
+ if log_level is not None:
+ logger.setLevel(log_level.upper())
+ logger.root.setLevel(log_level.upper())
+ return MultiProcessAdapter(logger, {})
diff --git a/venv/lib/python3.10/site-packages/accelerate/memory_utils.py b/venv/lib/python3.10/site-packages/accelerate/memory_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..fa2e2c8b9d7d0064c3e5e282737a7ad6919bde29
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/accelerate/memory_utils.py
@@ -0,0 +1,22 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import warnings
+
+
+warnings.warn(
+ "memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
+ "`from accelerate import find_executable_batch_size` to avoid this warning.",
+ FutureWarning,
+)
diff --git a/venv/lib/python3.10/site-packages/accelerate/optimizer.py b/venv/lib/python3.10/site-packages/accelerate/optimizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..c2fc3e9f1b7592b29ed18ce1ce78a0859286f438
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/accelerate/optimizer.py
@@ -0,0 +1,193 @@
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import inspect
+import warnings
+
+import torch
+
+from .state import AcceleratorState, GradientState
+from .utils import DistributedType, honor_type, is_torch_xla_available
+
+
+if is_torch_xla_available():
+ import torch_xla.core.xla_model as xm
+
+
+def move_to_device(state, device):
+ if isinstance(state, (list, tuple)):
+ return honor_type(state, (move_to_device(t, device) for t in state))
+ elif isinstance(state, dict):
+ return type(state)({k: move_to_device(v, device) for k, v in state.items()})
+ elif isinstance(state, torch.Tensor):
+ return state.to(device)
+ return state
+
+
+class AcceleratedOptimizer(torch.optim.Optimizer):
+ """
+ Internal wrapper around a torch optimizer.
+
+ Conditionally will perform `step` and `zero_grad` if gradients should be synchronized when performing gradient
+ accumulation.
+
+ Args:
+ optimizer (`torch.optim.optimizer.Optimizer`):
+ The optimizer to wrap.
+ device_placement (`bool`, *optional*, defaults to `True`):
+ Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of
+ `optimizer` on the right device.
+ scaler (`torch.cuda.amp.grad_scaler.GradScaler`, *optional*):
+ The scaler to use in the step function if training with mixed precision.
+ """
+
+ def __init__(self, optimizer, device_placement=True, scaler=None):
+ self.optimizer = optimizer
+ self.scaler = scaler
+ self.accelerator_state = AcceleratorState()
+ self.gradient_state = GradientState()
+ self.device_placement = device_placement
+ self._is_overflow = False
+
+ if self.scaler is not None:
+ self._accelerate_step_called = False
+ self._optimizer_original_step_method = self.optimizer.step
+ self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step)
+
+ # Handle device placement
+ if device_placement:
+ state_dict = self.optimizer.state_dict()
+ if self.accelerator_state.distributed_type == DistributedType.XLA:
+ xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device)
+ else:
+ state_dict = move_to_device(state_dict, self.accelerator_state.device)
+ self.optimizer.load_state_dict(state_dict)
+
+ @property
+ def state(self):
+ return self.optimizer.state
+
+ @state.setter
+ def state(self, state):
+ self.optimizer.state = state
+
+ @property
+ def param_groups(self):
+ return self.optimizer.param_groups
+
+ @param_groups.setter
+ def param_groups(self, param_groups):
+ self.optimizer.param_groups = param_groups
+
+ @property
+ def defaults(self):
+ return self.optimizer.defaults
+
+ @defaults.setter
+ def defaults(self, defaults):
+ self.optimizer.defaults = defaults
+
+ def add_param_group(self, param_group):
+ self.optimizer.add_param_group(param_group)
+
+ def load_state_dict(self, state_dict):
+ if self.accelerator_state.distributed_type == DistributedType.XLA and self.device_placement:
+ xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device)
+ self.optimizer.load_state_dict(state_dict)
+
+ def state_dict(self):
+ return self.optimizer.state_dict()
+
+ def zero_grad(self, set_to_none=None):
+ if self.gradient_state.sync_gradients:
+ accept_arg = "set_to_none" in inspect.signature(self.optimizer.zero_grad).parameters
+ if accept_arg:
+ if set_to_none is None:
+ set_to_none = True
+ self.optimizer.zero_grad(set_to_none=set_to_none)
+ else:
+ if set_to_none is not None:
+ raise ValueError("`set_to_none` for Optimizer.zero_grad` is not supported by this optimizer.")
+ self.optimizer.zero_grad()
+
+ def step(self, closure=None):
+ if (
+ not self.gradient_state.is_xla_gradients_synced
+ and self.accelerator_state.distributed_type == DistributedType.XLA
+ ):
+ gradients = xm._fetch_gradients(self.optimizer)
+ xm.all_reduce("sum", gradients, scale=1.0 / xm.xrt_world_size())
+ self.gradient_state.is_xla_gradients_synced = True
+ if self.gradient_state.sync_gradients:
+ if self.scaler is not None:
+ self.optimizer.step = self._optimizer_patched_step_method
+
+ self.scaler.step(self.optimizer, closure)
+ self.scaler.update()
+
+ if not self._accelerate_step_called:
+ # If the optimizer step was skipped, gradient overflow was detected.
+ self._is_overflow = True
+ else:
+ self._is_overflow = False
+ # Reset the step method to the original one
+ self.optimizer.step = self._optimizer_original_step_method
+ # Reset the indicator
+ self._accelerate_step_called = False
+ else:
+ self.optimizer.step(closure)
+ if self.accelerator_state.distributed_type == DistributedType.XLA:
+ self.gradient_state.is_xla_gradients_synced = False
+
+ def _switch_parameters(self, parameters_map):
+ for param_group in self.optimizer.param_groups:
+ param_group["params"] = [parameters_map.get(p, p) for p in param_group["params"]]
+
+ @property
+ def is_overflow(self):
+ """Whether or not the optimizer step was done, or skipped because of gradient overflow."""
+ warnings.warn(
+ "The `is_overflow` property is deprecated and will be removed in version 1.0 of Accelerate use "
+ "`optimizer.step_was_skipped` instead.",
+ FutureWarning,
+ )
+ return self._is_overflow
+
+ @property
+ def step_was_skipped(self):
+ """Whether or not the optimizer step was skipped."""
+ return self._is_overflow
+
+ def __getstate__(self):
+ _ignored_keys = [
+ "_accelerate_step_called",
+ "_optimizer_original_step_method",
+ "_optimizer_patched_step_method",
+ ]
+ return {k: v for k, v in self.__dict__.items() if k not in _ignored_keys}
+
+ def __setstate__(self, state):
+ self.__dict__.update(state)
+ if self.scaler is not None:
+ self._accelerate_step_called = False
+ self._optimizer_original_step_method = self.optimizer.step
+ self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step)
+
+
+def patch_optimizer_step(accelerated_optimizer: AcceleratedOptimizer, method):
+ def patched_step(*args, **kwargs):
+ accelerated_optimizer._accelerate_step_called = True
+ return method(*args, **kwargs)
+
+ return patched_step
diff --git a/venv/lib/python3.10/site-packages/accelerate/scheduler.py b/venv/lib/python3.10/site-packages/accelerate/scheduler.py
new file mode 100644
index 0000000000000000000000000000000000000000..1fa8a13f238afd7b908ee8e8cb8e0620f48d4ff8
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/accelerate/scheduler.py
@@ -0,0 +1,98 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
+
+import warnings
+
+from .state import AcceleratorState, GradientState
+
+
+warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
+
+
+class AcceleratedScheduler:
+ """
+ A wrapper around a learning rate scheduler that will only step when the optimizer(s) have a training step. Useful
+ to avoid making a scheduler step too fast when gradients went overflow and there was no training step (in mixed
+ precision training)
+
+ When performing gradient accumulation scheduler lengths should not be changed accordingly, Accelerate will always
+ step the scheduler to account for it.
+
+ Args:
+ scheduler (`torch.optim.lr_scheduler._LRScheduler`):
+ The scheduler to wrap.
+ optimizers (one or a list of `torch.optim.Optimizer`):
+ The optimizers used.
+ step_with_optimizer (`bool`, *optional*, defaults to `True`):
+ Whether or not the scheduler should be stepped at each optimizer step.
+ split_batches (`bool`, *optional*, defaults to `False`):
+ Whether or not the dataloaders split one batch across the different processes (so batch size is the same
+ regardless of the number of processes) or create batches on each process (so batch size is the original
+ batch size multiplied by the number of processes).
+ """
+
+ def __init__(self, scheduler, optimizers, step_with_optimizer: bool = True, split_batches: bool = False):
+ self.scheduler = scheduler
+ self.optimizers = optimizers if isinstance(optimizers, (list, tuple)) else [optimizers]
+ self.split_batches = split_batches
+ self.step_with_optimizer = step_with_optimizer
+ self.gradient_state = GradientState()
+
+ def step(self, *args, **kwargs):
+ if not self.step_with_optimizer:
+ # No link between scheduler and optimizer -> just step
+ self.scheduler.step(*args, **kwargs)
+ return
+
+ # Otherwise, first make sure the optimizer was stepped.
+ if not self.gradient_state.sync_gradients:
+ if self.gradient_state.adjust_scheduler:
+ self.scheduler._step_count += 1
+ return
+
+ for opt in self.optimizers:
+ if opt.step_was_skipped:
+ return
+ if self.split_batches:
+ # Split batches -> the training dataloader batch size is not changed so one step per training step
+ self.scheduler.step(*args, **kwargs)
+ else:
+ # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
+ # num_processes steps per training step
+ num_processes = AcceleratorState().num_processes
+ for _ in range(num_processes):
+ # Special case when using OneCycle and `drop_last` was not used
+ if hasattr(self.scheduler, "total_steps"):
+ if self.scheduler._step_count <= self.scheduler.total_steps:
+ self.scheduler.step(*args, **kwargs)
+ else:
+ self.scheduler.step(*args, **kwargs)
+
+ # Passthroughs
+ def get_last_lr(self):
+ return self.scheduler.get_last_lr()
+
+ def state_dict(self):
+ return self.scheduler.state_dict()
+
+ def load_state_dict(self, state_dict):
+ self.scheduler.load_state_dict(state_dict)
+
+ def get_lr(self):
+ return self.scheduler.get_lr()
+
+ def print_lr(self, *args, **kwargs):
+ return self.scheduler.print_lr(*args, **kwargs)
diff --git a/venv/lib/python3.10/site-packages/accelerate/state.py b/venv/lib/python3.10/site-packages/accelerate/state.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d65c5a1314bd9cdb8013808f1540184ba08b12b
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/accelerate/state.py
@@ -0,0 +1,1209 @@
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import annotations
+
+import logging
+import math
+import os
+import threading
+import warnings
+from contextlib import contextmanager
+from functools import partial
+from typing import Any, Callable, Optional
+
+import torch
+
+from .utils import (
+ DistributedType,
+ DynamoBackend,
+ GradientAccumulationPlugin,
+ check_cuda_p2p_ib_support,
+ check_fp8_capability,
+ get_ccl_version,
+ get_cpu_distributed_information,
+ get_int_from_env,
+ is_ccl_available,
+ is_datasets_available,
+ is_deepspeed_available,
+ is_fp8_available,
+ is_ipex_available,
+ is_mlu_available,
+ is_mps_available,
+ is_npu_available,
+ is_torch_xla_available,
+ is_xpu_available,
+ parse_choice_from_env,
+ parse_flag_from_env,
+ set_numa_affinity,
+)
+from .utils.dataclasses import SageMakerDistributedType
+
+
+if is_torch_xla_available():
+ import torch_xla.core.xla_model as xm
+
+if is_mlu_available(check_device=False):
+ import torch_mlu # noqa: F401
+
+if is_npu_available(check_device=False):
+ import torch_npu # noqa: F401
+
+logger = logging.getLogger(__name__)
+
+
+def is_initialized() -> bool:
+ """
+ Checks if the `AcceleratorState` has been initialized from `Accelerator`. Same as `AcceleratorState.initialized`,
+ but works as a module method.
+ """
+ return AcceleratorState._shared_state != {}
+
+
+# Lambda function that does nothing
+def do_nothing(*args, **kwargs):
+ return None
+
+
+class ThreadLocalSharedDict(threading.local):
+ """
+ Descriptor that holds a dict shared between instances of a class in the same thread.
+
+ Note: Descriptors have slightly different semantics than just a dict field on its own.
+ `PartialState(...)._shared_state` and `PartialState._shared_state` (instance vs class) give the same value: the
+ underlying _storage dict. Likewise, `PartialState(...)._shared_state = {...}` overrides the _storage dict inside
+ the descriptor as you would expect. However, `PartialState._shared_state = {}` actually replaces the descriptor
+ object with a dict instead Thus, you should modify the _storage dict in-place (e.g. `_shared_state.clear()`).
+
+ See Python documentation for an explanation of descriptors: https://docs.python.org/3/howto/descriptor.html
+
+ This is required for using PyTorch/XLA with PJRT in multithreaded mode (required for TPU v2 and v3).
+
+ See https://github.com/pytorch/xla/blob/r2.0/docs/pjrt.md#multithreading-on-tpu-v2v3
+ """
+
+ def __init__(self, thread_local: bool = False):
+ self._storage = {}
+
+ def __get__(self, obj, objtype=None):
+ return self._storage
+
+ def __set__(self, obj, value):
+ self._storage = value
+
+
+# Prefer global shared dictionary, except when using TPU.
+SharedDict = dict if not is_torch_xla_available() else ThreadLocalSharedDict
+
+
+# Inspired by Alex Martelli's 'Borg'.
+class PartialState:
+ """
+ Singleton class that has information about the current training environment and functions to help with process
+ control. Designed to be used when only process control and device execution states are needed. Does *not* need to
+ be initialized from `Accelerator`.
+
+ Args:
+ cpu (`bool`, *optional*):
+ Whether or not to force the script to execute on CPU. Will ignore any accelerators available if set to
+ `True` and force the execution on the CPU.
+ kwargs (additional keyword arguments, *optional*):
+ Additional keyword arguments to pass to the relevent `init_process_group` function. Valid `kwargs` can be
+ found in [`utils.InitProcessGroupKwargs`]. See the example section for detailed usage.
+
+ **Available attributes:**
+
+ - **device** (`torch.device`) -- The device to use.
+ - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently
+ in use.
+ - **local_process_index** (`int`) -- The index of the current process on the current server.
+ - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type
+ of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8').
+ - **num_processes** (`int`) -- The number of processes currently launched in parallel.
+ - **process_index** (`int`) -- The index of the current process.
+ - **is_last_process** (`bool`) -- Whether or not the current process is the last one.
+ - **is_main_process** (`bool`) -- Whether or not the current process is the main one.
+ - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node.
+ - **debug** (`bool`) -- Whether or not the current script is being run in debug mode.
+
+ Example:
+ ```python
+ from accelerate.utils import InitProcessGroupKwargs
+
+ # To include `InitProcessGroupKwargs`, init then call `.to_kwargs()`
+ kwargs = InitProcessGroupKwargs(...).to_kwargs()
+ state = PartialState(**kwargs)
+ ```
+ """
+
+ _shared_state = SharedDict()
+ _known_attrs = [
+ "_cpu",
+ "_mixed_precision",
+ "_shared_state",
+ "backend",
+ "debug",
+ "device",
+ "distributed_type",
+ "fork_launched",
+ "local_process_index",
+ "num_processes",
+ "process_index",
+ ]
+
+ def __init__(self, cpu: bool = False, **kwargs):
+ self.__dict__ = self._shared_state
+ if not self.initialized:
+ self._cpu = cpu
+ self.backend = None
+ env_device = os.environ.get("ACCELERATE_TORCH_DEVICE", None)
+ self.device = torch.device(env_device) if env_device is not None else None
+ self.debug = parse_flag_from_env("ACCELERATE_DEBUG_MODE")
+ use_sagemaker_dp = kwargs.pop("_use_sagemaker_dp", None)
+ dist_information = None
+ if use_sagemaker_dp is None:
+ use_sagemaker_dp = (
+ os.environ.get("ACCELERATE_USE_SAGEMAKER", "false") == "true"
+ and os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") != SageMakerDistributedType.NO
+ )
+
+ # Sets up self.backend + imports
+ original_backend = kwargs.pop("backend", None)
+ backend, distributed_type = self._prepare_backend(cpu, use_sagemaker_dp, original_backend)
+ if original_backend is not None and backend != original_backend:
+ raise ValueError("Your assigned backend {original_backend} is not avaliable, please use {backend}")
+ self.backend = backend
+ self.distributed_type = distributed_type
+ use_deepspeed = False
+ if not cpu and self.backend != "xla":
+ if int(os.environ.get("LOCAL_RANK", -1)) != -1:
+ # Deal with spawning deepspeed
+ if os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true":
+ if not is_deepspeed_available():
+ raise ImportError(
+ "DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source"
+ )
+ from deepspeed import comm as dist
+
+ if is_xpu_available() and is_ccl_available():
+ os.environ["CCL_PROCESS_LAUNCHER"] = "none"
+ os.environ["CCL_LOCAL_SIZE"] = os.environ.get("LOCAL_WORLD_SIZE", "1")
+ os.environ["CCL_LOCAL_RANK"] = os.environ.get("LOCAL_RANK", "0")
+
+ if not dist.is_initialized():
+ dist.init_distributed(dist_backend=self.backend, auto_mpi_discovery=False, **kwargs)
+ # We need to flag to `use_deepspeed` to be True to override `distributed_type` later
+ use_deepspeed = True
+ # Deal with all other backends but XPU and CPU, that gets handled special later
+ elif (
+ self.distributed_type not in (DistributedType.MULTI_XPU, DistributedType.MULTI_CPU)
+ and not torch.distributed.is_initialized()
+ ):
+ torch.distributed.init_process_group(backend=self.backend, **kwargs)
+ # XPU and CPU require special env configs to be set
+ if self.distributed_type in (DistributedType.MULTI_XPU, DistributedType.MULTI_CPU):
+ dist_information = get_cpu_distributed_information()
+ os.environ["RANK"] = str(dist_information.rank)
+ os.environ["WORLD_SIZE"] = str(dist_information.world_size)
+ os.environ["LOCAL_RANK"] = str(dist_information.local_rank)
+ os.environ["LOCAL_WORLD_SIZE"] = str(dist_information.local_world_size)
+ if self.backend == "ccl" and self.distributed_type == DistributedType.MULTI_XPU:
+ os.environ["CCL_PROCESS_LAUNCHER"] = "none"
+ os.environ["CCL_LOCAL_SIZE"] = os.environ["LOCAL_WORLD_SIZE"]
+ os.environ["CCL_LOCAL_RANK"] = os.environ["LOCAL_RANK"]
+ if not os.environ.get("MASTER_PORT", None):
+ os.environ["MASTER_PORT"] = "29500"
+ if (
+ not os.environ.get("MASTER_ADDR", None)
+ and dist_information.local_world_size != dist_information.world_size
+ and self.backend != "mpi"
+ ):
+ raise ValueError(
+ "Tried to launch on distributed with multinode, but `MASTER_ADDR` env was not set, "
+ "please try exporting rank 0's hostname as `MASTER_ADDR`"
+ )
+ kwargs["rank"] = dist_information.rank
+ kwargs["world_size"] = dist_information.world_size
+
+ if (
+ self.distributed_type == DistributedType.MULTI_CPU
+ and get_int_from_env(["OMP_NUM_THREADS", "OMP_NUM_THREADS"], 0) > 0
+ ):
+ import psutil
+
+ num_cpu_threads_per_process = int(
+ psutil.cpu_count(logical=False) / dist_information.local_world_size
+ )
+ if num_cpu_threads_per_process == 0:
+ num_cpu_threads_per_process = 1
+ torch.set_num_threads(num_cpu_threads_per_process)
+ warnings.warn(
+ f"OMP_NUM_THREADS/MKL_NUM_THREADS unset, we set it at {num_cpu_threads_per_process} to improve oob"
+ " performance."
+ )
+
+ if not torch.distributed.is_initialized():
+ torch.distributed.init_process_group(backend=self.backend, **kwargs)
+
+ # No backend == no distributed training
+ if self.backend is None:
+ self.distributed_type = DistributedType.NO
+ self.num_processes = 1
+ self.process_index = 0
+ self.local_process_index = 0
+ elif self.backend == "xla":
+ # XLA needs device setting first for `set_replication`
+ self.set_device()
+ xm.set_replication(self.device, xm.get_xla_supported_devices())
+ self.num_processes = xm.xrt_world_size()
+ self.process_index = xm.get_ordinal()
+ if is_torch_xla_available(check_is_tpu=True):
+ self.local_process_index = xm.get_local_ordinal()
+ else:
+ self.local_process_index = int(os.environ.get("LOCAL_RANK", -1))
+ else:
+ self.num_processes = torch.distributed.get_world_size()
+ self.process_index = torch.distributed.get_rank()
+ self.local_process_index = (
+ int(os.environ.get("LOCAL_RANK", -1)) if dist_information is None else dist_information.local_rank
+ )
+ self.set_device()
+ # Now we can change to deepseed
+ if use_deepspeed:
+ self.distributed_type = DistributedType.DEEPSPEED
+
+ # Set CPU affinity if enabled
+ if parse_flag_from_env("ACCELERATE_CPU_AFFINITY", False):
+ set_numa_affinity(self.local_process_index)
+
+ # Check for old RTX 4000's that can't use P2P or IB and are on old drivers
+ if self.device.type == "cuda" and not check_cuda_p2p_ib_support():
+ if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ:
+ raise NotImplementedError(
+ "Using RTX 4000 series doesn't support faster communication broadband via P2P or IB. "
+ 'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which '
+ "will do this automatically."
+ )
+ # Important: This should be the *only* code outside of `self.initialized!`
+ self.fork_launched = parse_flag_from_env("FORK_LAUNCHED", 0)
+
+ def __repr__(self) -> str:
+ return (
+ f"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\n"
+ f"Num processes: {self.num_processes}\n"
+ f"Process index: {self.process_index}\n"
+ f"Local process index: {self.local_process_index}\n"
+ f"Device: {self.device}\n"
+ )
+
+ @staticmethod
+ def _reset_state():
+ "Resets `_shared_state`, is used internally and should not be called"
+ PartialState._shared_state.clear()
+
+ @property
+ def initialized(self) -> bool:
+ "Returns whether the `PartialState` has been initialized"
+ return self._shared_state != {}
+
+ @property
+ def use_distributed(self):
+ """
+ Whether the Accelerator is configured for distributed training
+ """
+ return self.distributed_type != DistributedType.NO and self.num_processes > 1
+
+ @property
+ def is_last_process(self) -> bool:
+ "Returns whether the current process is the last one"
+ return self.process_index == self.num_processes - 1
+
+ @property
+ def is_main_process(self) -> bool:
+ "Returns whether the current process is the main process"
+ return (
+ self.process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process
+ )
+
+ @property
+ def is_local_main_process(self) -> bool:
+ "Returns whether the current process is the main process on the local node"
+ return (
+ self.local_process_index == 0
+ if self.distributed_type != DistributedType.MEGATRON_LM
+ else self.is_last_process
+ )
+
+ def wait_for_everyone(self):
+ """
+ Will stop the execution of the current process until every other process has reached that point (so this does
+ nothing when the script is only run in one process). Useful to do before saving a model.
+
+ Example:
+
+ ```python
+ >>> # Assuming two GPU processes
+ >>> import time
+ >>> from accelerate.state import PartialState
+
+ >>> state = PartialState()
+ >>> if state.is_main_process:
+ ... time.sleep(2)
+ >>> else:
+ ... print("I'm waiting for the main process to finish its sleep...")
+ >>> state.wait_for_everyone()
+ >>> # Should print on every process at the same time
+ >>> print("Everyone is here")
+ ```
+ """
+ if self.distributed_type in (
+ DistributedType.MULTI_GPU,
+ DistributedType.MULTI_MLU,
+ DistributedType.MULTI_NPU,
+ DistributedType.MULTI_XPU,
+ DistributedType.MULTI_CPU,
+ DistributedType.DEEPSPEED,
+ DistributedType.FSDP,
+ ):
+ torch.distributed.barrier()
+ elif self.distributed_type == DistributedType.XLA:
+ xm.rendezvous("accelerate.utils.wait_for_everyone")
+
+ def _goes_first(self, is_main: bool):
+ if not is_main:
+ self.wait_for_everyone()
+
+ yield
+
+ if is_main:
+ self.wait_for_everyone()
+
+ @contextmanager
+ def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False):
+ """
+ Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing
+ distributed inference, such as with different prompts.
+
+ Note that when using a `dict`, all keys need to have the same number of elements.
+
+ Args:
+ inputs (`list`, `tuple`, `torch.Tensor`, `dict` of `list`/`tuple`/`torch.Tensor`, or `datasets.Dataset`):
+ The input to split between processes.
+ apply_padding (`bool`, `optional`, defaults to `False`):
+ Whether to apply padding by repeating the last element of the input so that all processes have the same
+ number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing
+ in less inputs than there are processes. If so, just remember to drop the padded elements afterwards.
+
+
+ Example:
+
+ ```python
+ # Assume there are two processes
+ from accelerate import PartialState
+
+ state = PartialState()
+ with state.split_between_processes(["A", "B", "C"]) as inputs:
+ print(inputs)
+ # Process 0
+ ["A", "B"]
+ # Process 1
+ ["C"]
+
+ with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs:
+ print(inputs)
+ # Process 0
+ ["A", "B"]
+ # Process 1
+ ["C", "C"]
+ ```
+ """
+ if self.num_processes == 1:
+ yield inputs
+ return
+ length = len(inputs)
+ # Nested dictionary of any types
+ if isinstance(inputs, dict):
+ length = len(inputs[list(inputs.keys())[0]])
+ if not all(len(v) == length for v in inputs.values()):
+ raise ValueError("All values in the dictionary must have the same length")
+ num_samples_per_process = math.ceil(length / self.num_processes)
+ start_index = self.process_index * num_samples_per_process
+ end_index = start_index + num_samples_per_process
+ if (len(inputs) % self.num_processes != 0) and (self.process_index == self.num_processes - 1):
+ end_index = length
+
+ def _split_values(inputs, start_index, end_index):
+ if isinstance(inputs, (list, tuple, torch.Tensor)):
+ if start_index >= len(inputs):
+ result = inputs[-1:]
+ else:
+ result = inputs[start_index:end_index]
+ if apply_padding:
+ if isinstance(result, torch.Tensor):
+ from accelerate.utils import pad_across_processes, send_to_device
+
+ # The tensor needs to be on the device before we can pad it
+ tensorized_result = send_to_device(result, self.device)
+ result = pad_across_processes(tensorized_result, pad_index=inputs[-1])
+ else:
+ result += [result[-1]] * (num_samples_per_process - len(result))
+ return result
+ elif isinstance(inputs, dict):
+ for key in inputs.keys():
+ inputs[key] = _split_values(inputs[key], start_index, end_index)
+ return inputs
+ else:
+ if is_datasets_available():
+ from datasets import Dataset
+
+ if isinstance(inputs, Dataset):
+ if start_index >= len(inputs):
+ start_index = len(inputs) - 1
+ if end_index > len(inputs):
+ end_index = len(inputs)
+ result_idcs = list(range(start_index, end_index))
+ if apply_padding:
+ result_idcs += [end_index - 1] * (num_samples_per_process - len(result_idcs))
+ return inputs.select(result_idcs)
+ return inputs
+
+ yield _split_values(inputs, start_index, end_index)
+
+ @contextmanager
+ def main_process_first(self):
+ """
+ Lets the main process go first inside a with block.
+
+ The other processes will enter the with block after the main process exits.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> with accelerator.main_process_first():
+ ... # This will be printed first by process 0 then in a seemingly
+ ... # random order by the other processes.
+ ... print(f"This will be printed by process {accelerator.process_index}")
+ ```
+ """
+ yield from self._goes_first(self.is_main_process)
+
+ @contextmanager
+ def local_main_process_first(self):
+ """
+ Lets the local main process go inside a with block.
+
+ The other processes will enter the with block after the main process exits.
+
+ Example:
+
+ ```python
+ >>> from accelerate.state import PartialState
+
+ >>> state = PartialState()
+ >>> with state.local_main_process_first():
+ ... # This will be printed first by local process 0 then in a seemingly
+ ... # random order by the other processes.
+ ... print(f"This will be printed by process {state.local_process_index}")
+ ```
+ """
+ yield from self._goes_first(self.is_local_main_process)
+
+ def on_main_process(self, function: Callable[..., Any] = None):
+ """
+ Decorator that only runs the decorated function on the main process.
+
+ Args:
+ function (`Callable`): The function to decorate.
+
+ Example:
+
+ ```python
+ >>> from accelerate.state import PartialState
+
+ >>> state = PartialState()
+
+
+ >>> @state.on_main_process
+ ... def print_something():
+ ... print("This will be printed by process 0 only.")
+
+
+ >>> print_something()
+ "This will be printed by process 0 only"
+ ```
+ """
+ if not self.initialized:
+ raise ValueError("The `PartialState` or `Accelerator` must be initialized before calling this function.")
+ if self.is_main_process or not self.use_distributed:
+ return function
+ return do_nothing
+
+ def on_local_main_process(self, function: Callable[..., Any] = None):
+ """
+ Decorator that only runs the decorated function on the local main process.
+
+ Args:
+ function (`Callable`): The function to decorate.
+
+ Example:
+ ```python
+ # Assume we have 2 servers with 4 processes each.
+ from accelerate.state import PartialState
+
+ state = PartialState()
+
+
+ @state.on_local_main_process
+ def print_something():
+ print("This will be printed by process 0 only on each server.")
+
+
+ print_something()
+ # On server 1:
+ "This will be printed by process 0 only"
+ # On server 2:
+ "This will be printed by process 0 only"
+ ```
+ """
+ if self.is_local_main_process or not self.use_distributed:
+ return function
+ return do_nothing
+
+ def on_last_process(self, function: Callable[..., Any]):
+ """
+ Decorator that only runs the decorated function on the last process.
+
+ Args:
+ function (`Callable`): The function to decorate.
+
+ Example:
+ ```python
+ # Assume we have 4 processes.
+ from accelerate.state import PartialState
+
+ state = PartialState()
+
+
+ @state.on_last_process
+ def print_something():
+ print(f"Printed on process {state.process_index}")
+
+
+ print_something()
+ "Printed on process 3"
+ ```
+ """
+ if self.is_last_process or not self.use_distributed:
+ return function
+ return do_nothing
+
+ def on_process(self, function: Callable[..., Any] = None, process_index: int = None):
+ """
+ Decorator that only runs the decorated function on the process with the given index.
+
+ Args:
+ function (`Callable`, `optional`):
+ The function to decorate.
+ process_index (`int`, `optional`):
+ The index of the process on which to run the function.
+
+ Example:
+ ```python
+ # Assume we have 4 processes.
+ from accelerate.state import PartialState
+
+ state = PartialState()
+
+
+ @state.on_process(process_index=2)
+ def print_something():
+ print(f"Printed on process {state.process_index}")
+
+
+ print_something()
+ "Printed on process 2"
+ ```
+ """
+ if function is None:
+ return partial(self.on_process, process_index=process_index)
+ if (self.process_index == process_index) or (not self.use_distributed):
+ return function
+ return do_nothing
+
+ def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None):
+ """
+ Decorator that only runs the decorated function on the process with the given index on the current node.
+
+ Args:
+ function (`Callable`, *optional*):
+ The function to decorate.
+ local_process_index (`int`, *optional*):
+ The index of the local process on which to run the function.
+
+ Example:
+ ```python
+ # Assume we have 2 servers with 4 processes each.
+ from accelerate import Accelerator
+
+ accelerator = Accelerator()
+
+
+ @accelerator.on_local_process(local_process_index=2)
+ def print_something():
+ print(f"Printed on process {accelerator.local_process_index}")
+
+
+ print_something()
+ # On server 1:
+ "Printed on process 2"
+ # On server 2:
+ "Printed on process 2"
+ ```
+ """
+ if function is None:
+ return partial(self.on_local_process, local_process_index=local_process_index)
+ if (self.local_process_index == local_process_index) or (not self.use_distributed):
+ return function
+ return do_nothing
+
+ def print(self, *args, **kwargs):
+ if self.is_local_main_process:
+ print(*args, **kwargs)
+
+ @property
+ def default_device(self) -> torch.device:
+ """
+ Returns the default device which is:
+ - MPS if `torch.backends.mps.is_available()` and `torch.backends.mps.is_built()` both return True.
+ - CUDA if `torch.cuda.is_available()`
+ - MLU if `is_mlu_available()`
+ - NPU if `is_npu_available()`
+ - CPU otherwise
+ """
+ if is_mps_available():
+ os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
+ return torch.device("mps")
+ elif is_mlu_available():
+ return torch.device("mlu")
+ elif torch.cuda.is_available():
+ return torch.device("cuda")
+ elif is_xpu_available():
+ return torch.device("xpu:0")
+ elif is_npu_available():
+ return torch.device("npu")
+ else:
+ return torch.device("cpu")
+
+ def _prepare_backend(
+ self, cpu: bool = False, sagemaker_dp=False, backend: str = None
+ ) -> tuple[str, DistributedType]:
+ "Prepares any imports needed before initializing the distributed backend and sets `self.backend` properly"
+ distributed_type = None
+ if sagemaker_dp:
+ import smdistributed.dataparallel.torch.torch_smddp # noqa
+
+ backend = "smddp"
+ distributed_type = DistributedType.MULTI_GPU
+ elif is_torch_xla_available():
+ backend = "xla"
+ distributed_type = DistributedType.XLA
+ elif int(os.environ.get("LOCAL_RANK", -1)) != -1 and not cpu:
+ if is_mlu_available():
+ backend = "cncl"
+ distributed_type = DistributedType.MULTI_MLU
+ elif torch.cuda.is_available():
+ if backend is None:
+ backend = "nccl"
+ distributed_type = DistributedType.MULTI_GPU
+ elif is_npu_available():
+ backend = "hccl"
+ distributed_type = DistributedType.MULTI_NPU
+
+ if distributed_type is None and (
+ int(os.environ.get("LOCAL_RANK", -1)) != -1
+ or get_int_from_env(["PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE", "WORLD_SIZE"], 1) > 1
+ ):
+ if not cpu and is_xpu_available():
+ distributed_type = DistributedType.MULTI_XPU
+ else:
+ distributed_type = DistributedType.MULTI_CPU
+
+ if (
+ backend in (None, "ccl")
+ and is_ccl_available()
+ and (get_int_from_env(["CCL_WORKER_COUNT"], 0) > 0 or distributed_type == DistributedType.MULTI_XPU)
+ ):
+ if get_ccl_version() >= "1.12":
+ import oneccl_bindings_for_pytorch # noqa: F401
+ else:
+ import torch_ccl # noqa: F401
+
+ backend = "ccl"
+ elif backend in (None, "mpi") and torch.distributed.is_mpi_available():
+ backend = "mpi"
+ else:
+ backend = "gloo"
+ if distributed_type is None:
+ distributed_type = DistributedType.NO
+
+ return backend, distributed_type
+
+ def set_device(self):
+ """
+ Sets the device in `self.device` to the current distributed environment.
+ """
+ if self.device is not None:
+ return
+ if self.distributed_type == DistributedType.NO:
+ self.device = torch.device("cpu") if self._cpu else self.default_device
+ return
+ device = str(self.distributed_type).split(".")[-1].replace("MULTI_", "").lower()
+ if device not in ("cpu", "gpu", "mlu", "npu", "xpu", "xla"):
+ raise ValueError(
+ f"Can't set device for {self.distributed_type} ({device}), verify we should be calling `_set_device()` for it!"
+ )
+ if device == "xla":
+ self.device = xm.xla_device()
+ else:
+ if device == "gpu":
+ device = "cuda"
+ self.device = torch.device(device, self.local_process_index)
+ if self.device is not None:
+ if device == "xpu":
+ torch.xpu.set_device(self.device)
+ elif device == "mlu":
+ torch.mlu.set_device(self.device)
+ elif device == "npu":
+ torch.npu.set_device(self.device)
+ elif device == "cuda":
+ torch.cuda.set_device(self.device)
+
+ def __getattr__(self, name: str):
+ # By this point we know that no attributes of `self` contain `name`,
+ # so we just modify the error message
+ if name in self._known_attrs:
+ raise AttributeError(
+ f"`PartialState` object has no attribute `{name}`. "
+ "This happens if `PartialState._reset_state()` was called and "
+ "an `Accelerator` or `PartialState` was not reinitialized."
+ )
+ # Raise a typical AttributeError
+ raise AttributeError(f"'PartialState' object has no attribute '{name}'")
+
+
+class AcceleratorState:
+ """
+ Singleton class that has information about the current training environment.
+
+ **Available attributes:**
+
+ - **device** (`torch.device`) -- The device to use.
+ - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently
+ in use.
+ - **initialized** (`bool`) -- Whether or not the `AcceleratorState` has been initialized from `Accelerator`.
+ - **local_process_index** (`int`) -- The index of the current process on the current server.
+ - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type
+ of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8').
+ - **num_processes** (`int`) -- The number of processes currently launched in parallel.
+ - **process_index** (`int`) -- The index of the current process.
+ - **is_last_process** (`bool`) -- Whether or not the current process is the last one.
+ - **is_main_process** (`bool`) -- Whether or not the current process is the main one.
+ - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node.
+ - **debug** (`bool`) -- Whether or not the current script is being run in debug mode.
+ """
+
+ _shared_state = SharedDict()
+ _known_attrs = PartialState._known_attrs + [
+ "deepspeed_plugin",
+ "use_ipex",
+ "fsdp_plugin",
+ "megatron_lm_plugin",
+ "dynamo_plugin",
+ ]
+
+ def __init__(
+ self,
+ mixed_precision: str = None,
+ cpu: bool = False,
+ dynamo_plugin=None,
+ deepspeed_plugin=None,
+ fsdp_plugin=None,
+ megatron_lm_plugin=None,
+ _from_accelerator: bool = False,
+ **kwargs,
+ ):
+ self.__dict__ = self._shared_state
+ if parse_flag_from_env("ACCELERATE_USE_CPU"):
+ cpu = True
+ if PartialState._shared_state == {}:
+ PartialState(cpu, **kwargs)
+ self.__dict__.update(PartialState._shared_state)
+ self._check_initialized(mixed_precision, cpu)
+ if not self.initialized:
+ self.deepspeed_plugin = None
+ self.use_ipex = None
+ mixed_precision = (
+ parse_choice_from_env("ACCELERATE_MIXED_PRECISION", "no")
+ if mixed_precision is None
+ else mixed_precision.lower()
+ )
+ if mixed_precision == "fp8":
+ if not is_fp8_available():
+ raise ValueError(
+ "Using `fp8` precision requires `transformer_engine` or `MS-AMP` to be installed."
+ )
+ elif not check_fp8_capability():
+ logger.warning(
+ f"The current device has compute capability of {torch.cuda.get_device_capability()} which is "
+ "insufficient for FP8 mixed precision training (requires a GPU Hopper/Ada Lovelace "
+ "or higher, compute capability of 8.9 or higher). Will use FP16 instead."
+ )
+ mixed_precision = "fp16"
+
+ self.dynamo_plugin = dynamo_plugin
+ if not _from_accelerator:
+ raise ValueError(
+ "Please make sure to properly initialize your accelerator via `accelerator = Accelerator()` "
+ "before using any functionality from the `accelerate` library."
+ )
+ # deepspeed handles mixed_precision using deepspeed_config
+ self._mixed_precision = "no" if self.distributed_type == DistributedType.DEEPSPEED else mixed_precision
+ if self.distributed_type == DistributedType.XLA and is_torch_xla_available(check_is_tpu=True):
+ if mixed_precision == "bf16":
+ if os.environ.get("ACCELERATE_DOWNCAST_BF16"):
+ os.environ["XLA_USE_BF16"] = str(0)
+ os.environ["XLA_DOWNCAST_BF16"] = str(1)
+ self.downcast_bfloat = True
+ else:
+ os.environ["XLA_USE_BF16"] = str(1)
+ os.environ["XLA_DOWNCAST_BF16"] = str(0)
+ self.downcast_bfloat = False
+ elif os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" and not cpu:
+ self.deepspeed_plugin = deepspeed_plugin
+ elif self.distributed_type in [
+ DistributedType.MULTI_GPU,
+ DistributedType.MULTI_MLU,
+ DistributedType.MULTI_NPU,
+ DistributedType.MULTI_XPU,
+ ]:
+ if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true":
+ self.distributed_type = DistributedType.FSDP
+ if self._mixed_precision != "no":
+ fsdp_plugin.set_mixed_precision(self._mixed_precision)
+ self.fsdp_plugin = fsdp_plugin
+ if os.environ.get("ACCELERATE_USE_MEGATRON_LM", "false") == "true" and self.distributed_type not in [
+ DistributedType.MULTI_NPU,
+ DistributedType.MULTI_XPU,
+ ]:
+ self.distributed_type = DistributedType.MEGATRON_LM
+ megatron_lm_plugin.set_mixed_precision(self._mixed_precision)
+ self.megatron_lm_plugin = megatron_lm_plugin
+ elif self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.NO]:
+ if is_ipex_available():
+ # check if user disables it explicitly
+ self.use_ipex = parse_flag_from_env("ACCELERATE_USE_IPEX", default=True)
+ else:
+ self.use_ipex = False
+ if (
+ self.dynamo_plugin.backend != DynamoBackend.NO
+ and self._mixed_precision == "no"
+ and self.device.type == "cuda"
+ ):
+ torch.backends.cuda.matmul.allow_tf32 = True
+ PartialState._shared_state["distributed_type"] = self.distributed_type
+
+ @property
+ def initialized(self) -> bool:
+ return self._shared_state != PartialState._shared_state
+
+ def __repr__(self):
+ repr = PartialState().__repr__() + f"\nMixed precision type: {self.mixed_precision}\n"
+ if self.distributed_type == DistributedType.DEEPSPEED:
+ repr += f"ds_config: {self.deepspeed_plugin.deepspeed_config}\n"
+ return repr
+
+ def _check_initialized(self, mixed_precision=None, cpu=None):
+ "Checks if a modification is trying to be made and the `AcceleratorState` has already been initialized"
+ if self.initialized:
+ err = "AcceleratorState has already been initialized and cannot be changed, restart your runtime completely and pass `{flag}` to `Accelerator()`."
+ if cpu and self.device.type != "cpu":
+ raise ValueError(err.format(flag="cpu=True"))
+ if (
+ mixed_precision is not None
+ and mixed_precision != self._mixed_precision
+ and self.distributed_type != DistributedType.DEEPSPEED
+ ):
+ raise ValueError(err.format(flag=f"mixed_precision='{mixed_precision}'"))
+
+ # For backward compatibility
+ @property
+ def use_fp16(self):
+ warnings.warn(
+ "The `use_fp16` property is deprecated and will be removed in version 1.0 of Accelerate use "
+ "`AcceleratorState.mixed_precision == 'fp16'` instead.",
+ FutureWarning,
+ )
+ return self._mixed_precision != "no"
+
+ @property
+ def mixed_precision(self):
+ if self.distributed_type == DistributedType.DEEPSPEED:
+ config = self.deepspeed_plugin.deepspeed_config
+ if config.get("fp16", {}).get("enabled", False):
+ mixed_precision = "fp16"
+ elif config.get("bf16", {}).get("enabled", False):
+ mixed_precision = "bf16"
+ else:
+ mixed_precision = "no"
+ else:
+ mixed_precision = self._mixed_precision
+ return mixed_precision
+
+ @staticmethod
+ def _reset_state(reset_partial_state: bool = False):
+ "Resets `_shared_state`, is used internally and should not be called"
+ AcceleratorState._shared_state.clear()
+ if reset_partial_state:
+ PartialState._reset_state()
+
+ @property
+ def use_distributed(self):
+ """
+ Whether the Accelerator is configured for distributed training
+ """
+ return PartialState().use_distributed
+
+ @property
+ def is_last_process(self) -> bool:
+ "Returns whether the current process is the last one"
+ return PartialState().is_last_process
+
+ @property
+ def is_main_process(self) -> bool:
+ "Returns whether the current process is the main process"
+ return PartialState().is_main_process
+
+ @property
+ def is_local_main_process(self) -> bool:
+ "Returns whether the current process is the main process on the local node"
+ return PartialState().is_local_main_process
+
+ def wait_for_everyone(self):
+ PartialState().wait_for_everyone()
+
+ @contextmanager
+ def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False):
+ """
+ Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing
+ distributed inference, such as with different prompts.
+
+ Note that when using a `dict`, all keys need to have the same number of elements.
+
+ Args:
+ inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`):
+ The input to split between processes.
+ apply_padding (`bool`, `optional`, defaults to `False`):
+ Whether to apply padding by repeating the last element of the input so that all processes have the same
+ number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing
+ in less inputs than there are processes. If so, just remember to drop the padded elements afterwards.
+
+
+ Example:
+
+ ```python
+ # Assume there are two processes
+ from accelerate.state import AcceleratorState
+
+ state = AcceleratorState()
+ with state.split_between_processes(["A", "B", "C"]) as inputs:
+ print(inputs)
+ # Process 0
+ ["A", "B"]
+ # Process 1
+ ["C"]
+
+ with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs:
+ print(inputs)
+ # Process 0
+ ["A", "B"]
+ # Process 1
+ ["C", "C"]
+ ```
+ """
+ with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs:
+ yield inputs
+
+ @contextmanager
+ def main_process_first(self):
+ """
+ Lets the main process go first inside a with block.
+
+ The other processes will enter the with block after the main process exits.
+ """
+ with PartialState().main_process_first():
+ yield
+
+ @contextmanager
+ def local_main_process_first(self):
+ """
+ Lets the local main process go inside a with block.
+
+ The other processes will enter the with block after the main process exits.
+ """
+ with PartialState().local_main_process_first():
+ yield
+
+ def print(self, *args, **kwargs):
+ PartialState().print(*args, **kwargs)
+
+ def __getattr__(self, name: str):
+ # By this point we know that no attributes of `self` contain `name`,
+ # so we just modify the error message
+ if name in self._known_attrs:
+ raise AttributeError(
+ f"`AcceleratorState` object has no attribute `{name}`. "
+ "This happens if `AcceleratorState._reset_state()` was called and "
+ "an `Accelerator` or `PartialState` was not reinitialized."
+ )
+ # Raise a typical AttributeError
+ raise AttributeError(f"'AcceleratorState' object has no attribute '{name}'")
+
+
+class GradientState:
+ """
+ Singleton class that has information related to gradient synchronization for gradient accumulation
+
+ **Available attributes:**
+
+ - **end_of_dataloader** (`bool`) -- Whether we have reached the end the current dataloader
+ - **remainder** (`int`) -- The number of extra samples that were added from padding the dataloader
+ - **sync_gradients** (`bool`) -- Whether the gradients should be synced across all devices
+ - **active_dataloader** (`Optional[DataLoader]`) -- The dataloader that is currently being iterated over
+ - **dataloader_references** (`List[Optional[DataLoader]]`) -- A list of references to the dataloaders that are
+ being iterated over
+ - **num_steps** (`int`) -- The number of steps to accumulate over
+ - **adjust_scheduler** (`bool`) -- Whether the scheduler should be adjusted to account for the gradient
+ accumulation
+ - **sync_with_dataloader** (`bool`) -- Whether the gradients should be synced at the end of the dataloader
+ iteration and the number of total steps reset
+ - **is_xla_gradients_synced** (`bool`) -- Whether the XLA gradients have been synchronized. It is initialized
+ as false. Once gradients have been reduced before the optimizer step, this flag is set to true. Subsequently,
+ after each step, the flag is reset to false. FSDP will always synchronize the gradients, hence
+ is_xla_gradients_synced is always true.
+ """
+
+ _shared_state = SharedDict()
+
+ def __init__(self, gradient_accumulation_plugin: Optional[GradientAccumulationPlugin] = None):
+ self.__dict__ = self._shared_state
+ if not self.initialized:
+ self.sync_gradients = True
+ self.active_dataloader = None
+ self.dataloader_references = [None]
+ self.plugin_kwargs = (
+ gradient_accumulation_plugin.to_kwargs() if gradient_accumulation_plugin is not None else {}
+ )
+ self._is_xla_gradients_synced = False
+
+ # Plugin args are different and can be updated
+ if gradient_accumulation_plugin is not None and self.plugin_kwargs != gradient_accumulation_plugin.to_kwargs():
+ self.plugin_kwargs = gradient_accumulation_plugin.to_kwargs()
+
+ @property
+ def num_steps(self) -> int:
+ "Returns the number of steps to accumulate over"
+ return self.plugin_kwargs.get("num_steps", 1)
+
+ @property
+ def adjust_scheduler(self) -> bool:
+ "Returns whether the scheduler should be adjusted"
+ return self.plugin_kwargs.get("adjust_scheduler", False)
+
+ @property
+ def sync_with_dataloader(self) -> bool:
+ "Returns whether the gradients should be synced at the end of the dataloader iteration and the number of total steps reset"
+ return self.plugin_kwargs.get("sync_with_dataloader", True)
+
+ @property
+ def initialized(self) -> bool:
+ "Returns whether the `GradientState` has been initialized"
+ return GradientState._shared_state != {}
+
+ @property
+ def end_of_dataloader(self) -> bool:
+ "Returns whether we have reached the end of the current dataloader"
+ if not self.in_dataloader:
+ return False
+ return self.active_dataloader.end_of_dataloader
+
+ @property
+ def remainder(self) -> int:
+ "Returns the number of extra samples that were added from padding the dataloader"
+ if not self.in_dataloader:
+ return -1
+ return self.active_dataloader.remainder
+
+ def __repr__(self):
+ return (
+ f"Sync Gradients: {self.sync_gradients}\n"
+ f"At end of current dataloader: {self.end_of_dataloader}\n"
+ f"Extra samples added: {self.remainder}\n"
+ f"Gradient accumulation plugin: {self.plugin_kwargs}\n"
+ )
+
+ @property
+ def is_xla_gradients_synced(self):
+ "Returns the value of is_xla_gradients_synced. FSDP will always synchronize the gradients, hence is_xla_gradients_synced is always true."
+ if parse_flag_from_env("ACCELERATE_USE_FSDP", default=False):
+ return True
+ return self._is_xla_gradients_synced
+
+ @is_xla_gradients_synced.setter
+ def is_xla_gradients_synced(self, is_synced):
+ "Set the _is_xla_gradients_synced attribute."
+ self._is_xla_gradients_synced = is_synced
+
+ def _set_sync_gradients(self, sync_gradients):
+ "Private function that sets whether gradients should be synchronized. Users should not have to call this."
+ self.sync_gradients = sync_gradients
+ # Allow grad-sync to automatically work on TPUs
+ if (
+ self.sync_gradients
+ and is_torch_xla_available(check_is_tpu=True)
+ and PartialState().distributed_type == DistributedType.XLA
+ ):
+ xm.mark_step()
+
+ def _add_dataloader(self, dataloader):
+ "Private function that adds a dataloader to `self.dataloader_references` and sets `in_dataloader` to `True`. Users should not have to call this."
+ self.active_dataloader = dataloader
+ self.dataloader_references.append(self.active_dataloader)
+
+ def _remove_dataloader(self, dataloader):
+ "Private function that removes a dataloader from `self.dataloader_references` and sets `in_dataloader` to `False` if there are no more dataloaders. Users should not have to call this."
+ self.dataloader_references.remove(dataloader)
+ self.active_dataloader = self.dataloader_references[-1]
+
+ @property
+ def in_dataloader(self) -> bool:
+ "Returns whether the current process is in a dataloader"
+ return self.active_dataloader is not None
+
+ @staticmethod
+ def _reset_state():
+ "Resets `_shared_state`, is used internally and should not be called"
+ GradientState._shared_state.clear()
diff --git a/venv/lib/python3.10/site-packages/accelerate/tracking.py b/venv/lib/python3.10/site-packages/accelerate/tracking.py
new file mode 100644
index 0000000000000000000000000000000000000000..5efba19bc6769d9c70ea8b17b8da784b908f529f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/accelerate/tracking.py
@@ -0,0 +1,1023 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Expectation:
+# Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}
+
+import json
+import os
+import time
+from functools import wraps
+from typing import Any, Dict, List, Optional, Union
+
+import yaml
+
+from .logging import get_logger
+from .state import PartialState
+from .utils import (
+ LoggerType,
+ is_aim_available,
+ is_clearml_available,
+ is_comet_ml_available,
+ is_dvclive_available,
+ is_mlflow_available,
+ is_tensorboard_available,
+ is_wandb_available,
+ listify,
+)
+
+
+_available_trackers = []
+
+if is_tensorboard_available():
+ _available_trackers.append(LoggerType.TENSORBOARD)
+
+if is_wandb_available():
+ _available_trackers.append(LoggerType.WANDB)
+
+if is_comet_ml_available():
+ _available_trackers.append(LoggerType.COMETML)
+
+if is_aim_available():
+ _available_trackers.append(LoggerType.AIM)
+
+if is_mlflow_available():
+ _available_trackers.append(LoggerType.MLFLOW)
+
+if is_clearml_available():
+ _available_trackers.append(LoggerType.CLEARML)
+
+if is_dvclive_available():
+ _available_trackers.append(LoggerType.DVCLIVE)
+
+logger = get_logger(__name__)
+
+
+def on_main_process(function):
+ """
+ Decorator to selectively run the decorated function on the main process only based on the `main_process_only`
+ attribute in a class.
+
+ Checks at function execution rather than initialization time, not triggering the initialization of the
+ `PartialState`.
+ """
+
+ @wraps(function)
+ def execute_on_main_process(self, *args, **kwargs):
+ if getattr(self, "main_process_only", False):
+ return PartialState().on_main_process(function)(self, *args, **kwargs)
+ else:
+ return function(self, *args, **kwargs)
+
+ return execute_on_main_process
+
+
+def get_available_trackers():
+ "Returns a list of all supported available trackers in the system"
+ return _available_trackers
+
+
+class GeneralTracker:
+ """
+ A base Tracker class to be used for all logging integration implementations.
+
+ Each function should take in `**kwargs` that will automatically be passed in from a base dictionary provided to
+ [`Accelerator`].
+
+ Should implement `name`, `requires_logging_directory`, and `tracker` properties such that:
+
+ `name` (`str`): String representation of the tracker class name, such as "TensorBoard" `requires_logging_directory`
+ (`bool`): Whether the logger requires a directory to store their logs. `tracker` (`object`): Should return internal
+ tracking mechanism used by a tracker class (such as the `run` for wandb)
+
+ Implementations can also include a `main_process_only` (`bool`) attribute to toggle if relevent logging, init, and
+ other functions should occur on the main process or across all processes (by default will use `True`)
+ """
+
+ main_process_only = True
+
+ def __init__(self, _blank=False):
+ if not _blank:
+ err = ""
+ if not hasattr(self, "name"):
+ err += "`name`"
+ if not hasattr(self, "requires_logging_directory"):
+ if len(err) > 0:
+ err += ", "
+ err += "`requires_logging_directory`"
+
+ # as tracker is a @property that relies on post-init
+ if "tracker" not in dir(self):
+ if len(err) > 0:
+ err += ", "
+ err += "`tracker`"
+ if len(err) > 0:
+ raise NotImplementedError(
+ f"The implementation for this tracker class is missing the following "
+ f"required attributes. Please define them in the class definition: "
+ f"{err}"
+ )
+
+ def store_init_configuration(self, values: dict):
+ """
+ Logs `values` as hyperparameters for the run. Implementations should use the experiment configuration
+ functionality of a tracking API.
+
+ Args:
+ values (Dictionary `str` to `bool`, `str`, `float` or `int`):
+ Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
+ `str`, `float`, `int`, or `None`.
+ """
+ pass
+
+ def log(self, values: dict, step: Optional[int], **kwargs):
+ """
+ Logs `values` to the current run. Base `log` implementations of a tracking API should go in here, along with
+ special behavior for the `step parameter.
+
+ Args:
+ values (Dictionary `str` to `str`, `float`, or `int`):
+ Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`.
+ step (`int`, *optional*):
+ The run step. If included, the log will be affiliated with this step.
+ """
+ pass
+
+ def finish(self):
+ """
+ Should run any finalizing functions within the tracking API. If the API should not have one, just don't
+ overwrite that method.
+ """
+ pass
+
+
+class TensorBoardTracker(GeneralTracker):
+ """
+ A `Tracker` class that supports `tensorboard`. Should be initialized at the start of your script.
+
+ Args:
+ run_name (`str`):
+ The name of the experiment run
+ logging_dir (`str`, `os.PathLike`):
+ Location for TensorBoard logs to be stored.
+ **kwargs (additional keyword arguments, *optional*):
+ Additional key word arguments passed along to the `tensorboard.SummaryWriter.__init__` method.
+ """
+
+ name = "tensorboard"
+ requires_logging_directory = True
+
+ @on_main_process
+ def __init__(self, run_name: str, logging_dir: Union[str, os.PathLike], **kwargs):
+ try:
+ from torch.utils import tensorboard
+ except ModuleNotFoundError:
+ import tensorboardX as tensorboard
+ super().__init__()
+ self.run_name = run_name
+ self.logging_dir = os.path.join(logging_dir, run_name)
+ self.writer = tensorboard.SummaryWriter(self.logging_dir, **kwargs)
+ logger.debug(f"Initialized TensorBoard project {self.run_name} logging to {self.logging_dir}")
+ logger.debug(
+ "Make sure to log any initial configurations with `self.store_init_configuration` before training!"
+ )
+
+ @property
+ def tracker(self):
+ return self.writer
+
+ @on_main_process
+ def store_init_configuration(self, values: dict):
+ """
+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. Stores the
+ hyperparameters in a yaml file for future use.
+
+ Args:
+ values (Dictionary `str` to `bool`, `str`, `float` or `int`):
+ Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
+ `str`, `float`, `int`, or `None`.
+ """
+ self.writer.add_hparams(values, metric_dict={})
+ self.writer.flush()
+ project_run_name = time.time()
+ dir_name = os.path.join(self.logging_dir, str(project_run_name))
+ os.makedirs(dir_name, exist_ok=True)
+ with open(os.path.join(dir_name, "hparams.yml"), "w") as outfile:
+ try:
+ yaml.dump(values, outfile)
+ except yaml.representer.RepresenterError:
+ logger.error("Serialization to store hyperparameters failed")
+ raise
+ logger.debug("Stored initial configuration hyperparameters to TensorBoard and hparams yaml file")
+
+ @on_main_process
+ def log(self, values: dict, step: Optional[int] = None, **kwargs):
+ """
+ Logs `values` to the current run.
+
+ Args:
+ values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`):
+ Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of
+ `str` to `float`/`int`.
+ step (`int`, *optional*):
+ The run step. If included, the log will be affiliated with this step.
+ kwargs:
+ Additional key word arguments passed along to either `SummaryWriter.add_scaler`,
+ `SummaryWriter.add_text`, or `SummaryWriter.add_scalers` method based on the contents of `values`.
+ """
+ values = listify(values)
+ for k, v in values.items():
+ if isinstance(v, (int, float)):
+ self.writer.add_scalar(k, v, global_step=step, **kwargs)
+ elif isinstance(v, str):
+ self.writer.add_text(k, v, global_step=step, **kwargs)
+ elif isinstance(v, dict):
+ self.writer.add_scalars(k, v, global_step=step, **kwargs)
+ self.writer.flush()
+ logger.debug("Successfully logged to TensorBoard")
+
+ @on_main_process
+ def log_images(self, values: dict, step: Optional[int], **kwargs):
+ """
+ Logs `images` to the current run.
+
+ Args:
+ values (Dictionary `str` to `List` of `np.ndarray` or `PIL.Image`):
+ Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or
+ step (`int`, *optional*):
+ The run step. If included, the log will be affiliated with this step.
+ kwargs:
+ Additional key word arguments passed along to the `SummaryWriter.add_image` method.
+ """
+ for k, v in values.items():
+ self.writer.add_images(k, v, global_step=step, **kwargs)
+ logger.debug("Successfully logged images to TensorBoard")
+
+ @on_main_process
+ def finish(self):
+ """
+ Closes `TensorBoard` writer
+ """
+ self.writer.close()
+ logger.debug("TensorBoard writer closed")
+
+
+class WandBTracker(GeneralTracker):
+ """
+ A `Tracker` class that supports `wandb`. Should be initialized at the start of your script.
+
+ Args:
+ run_name (`str`):
+ The name of the experiment run.
+ **kwargs (additional keyword arguments, *optional*):
+ Additional key word arguments passed along to the `wandb.init` method.
+ """
+
+ name = "wandb"
+ requires_logging_directory = False
+ main_process_only = False
+
+ @on_main_process
+ def __init__(self, run_name: str, **kwargs):
+ super().__init__()
+ self.run_name = run_name
+
+ import wandb
+
+ self.run = wandb.init(project=self.run_name, **kwargs)
+ logger.debug(f"Initialized WandB project {self.run_name}")
+ logger.debug(
+ "Make sure to log any initial configurations with `self.store_init_configuration` before training!"
+ )
+
+ @property
+ def tracker(self):
+ return self.run
+
+ @on_main_process
+ def store_init_configuration(self, values: dict):
+ """
+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
+
+ Args:
+ values (Dictionary `str` to `bool`, `str`, `float` or `int`):
+ Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
+ `str`, `float`, `int`, or `None`.
+ """
+ import wandb
+
+ wandb.config.update(values, allow_val_change=True)
+ logger.debug("Stored initial configuration hyperparameters to WandB")
+
+ @on_main_process
+ def log(self, values: dict, step: Optional[int] = None, **kwargs):
+ """
+ Logs `values` to the current run.
+
+ Args:
+ values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`):
+ Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of
+ `str` to `float`/`int`.
+ step (`int`, *optional*):
+ The run step. If included, the log will be affiliated with this step.
+ kwargs:
+ Additional key word arguments passed along to the `wandb.log` method.
+ """
+ self.run.log(values, step=step, **kwargs)
+ logger.debug("Successfully logged to WandB")
+
+ @on_main_process
+ def log_images(self, values: dict, step: Optional[int] = None, **kwargs):
+ """
+ Logs `images` to the current run.
+
+ Args:
+ values (Dictionary `str` to `List` of `np.ndarray` or `PIL.Image`):
+ Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or
+ step (`int`, *optional*):
+ The run step. If included, the log will be affiliated with this step.
+ kwargs:
+ Additional key word arguments passed along to the `wandb.log` method.
+ """
+ import wandb
+
+ for k, v in values.items():
+ self.log({k: [wandb.Image(image) for image in v]}, step=step, **kwargs)
+ logger.debug("Successfully logged images to WandB")
+
+ @on_main_process
+ def log_table(
+ self,
+ table_name: str,
+ columns: List[str] = None,
+ data: List[List[Any]] = None,
+ dataframe: Any = None,
+ step: Optional[int] = None,
+ **kwargs,
+ ):
+ """
+ Log a Table containing any object type (text, image, audio, video, molecule, html, etc). Can be defined either
+ with `columns` and `data` or with `dataframe`.
+
+ Args:
+ table_name (`str`):
+ The name to give to the logged table on the wandb workspace
+ columns (list of `str`, *optional*):
+ The name of the columns on the table
+ data (List of List of Any data type, *optional*):
+ The data to be logged in the table
+ dataframe (Any data type, *optional*):
+ The data to be logged in the table
+ step (`int`, *optional*):
+ The run step. If included, the log will be affiliated with this step.
+ """
+ import wandb
+
+ values = {table_name: wandb.Table(columns=columns, data=data, dataframe=dataframe)}
+ self.log(values, step=step, **kwargs)
+
+ @on_main_process
+ def finish(self):
+ """
+ Closes `wandb` writer
+ """
+ self.run.finish()
+ logger.debug("WandB run closed")
+
+
+class CometMLTracker(GeneralTracker):
+ """
+ A `Tracker` class that supports `comet_ml`. Should be initialized at the start of your script.
+
+ API keys must be stored in a Comet config file.
+
+ Args:
+ run_name (`str`):
+ The name of the experiment run.
+ **kwargs (additional keyword arguments, *optional*):
+ Additional key word arguments passed along to the `Experiment.__init__` method.
+ """
+
+ name = "comet_ml"
+ requires_logging_directory = False
+
+ @on_main_process
+ def __init__(self, run_name: str, **kwargs):
+ super().__init__()
+ self.run_name = run_name
+
+ from comet_ml import Experiment
+
+ self.writer = Experiment(project_name=run_name, **kwargs)
+ logger.debug(f"Initialized CometML project {self.run_name}")
+ logger.debug(
+ "Make sure to log any initial configurations with `self.store_init_configuration` before training!"
+ )
+
+ @property
+ def tracker(self):
+ return self.writer
+
+ @on_main_process
+ def store_init_configuration(self, values: dict):
+ """
+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
+
+ Args:
+ values (Dictionary `str` to `bool`, `str`, `float` or `int`):
+ Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
+ `str`, `float`, `int`, or `None`.
+ """
+ self.writer.log_parameters(values)
+ logger.debug("Stored initial configuration hyperparameters to CometML")
+
+ @on_main_process
+ def log(self, values: dict, step: Optional[int] = None, **kwargs):
+ """
+ Logs `values` to the current run.
+
+ Args:
+ values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`):
+ Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of
+ `str` to `float`/`int`.
+ step (`int`, *optional*):
+ The run step. If included, the log will be affiliated with this step.
+ kwargs:
+ Additional key word arguments passed along to either `Experiment.log_metric`, `Experiment.log_other`,
+ or `Experiment.log_metrics` method based on the contents of `values`.
+ """
+ if step is not None:
+ self.writer.set_step(step)
+ for k, v in values.items():
+ if isinstance(v, (int, float)):
+ self.writer.log_metric(k, v, step=step, **kwargs)
+ elif isinstance(v, str):
+ self.writer.log_other(k, v, **kwargs)
+ elif isinstance(v, dict):
+ self.writer.log_metrics(v, step=step, **kwargs)
+ logger.debug("Successfully logged to CometML")
+
+ @on_main_process
+ def finish(self):
+ """
+ Closes `comet-ml` writer
+ """
+ self.writer.end()
+ logger.debug("CometML run closed")
+
+
+class AimTracker(GeneralTracker):
+ """
+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.
+
+ Args:
+ run_name (`str`):
+ The name of the experiment run.
+ **kwargs (additional keyword arguments, *optional*):
+ Additional key word arguments passed along to the `Run.__init__` method.
+ """
+
+ name = "aim"
+ requires_logging_directory = True
+
+ @on_main_process
+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = ".", **kwargs):
+ self.run_name = run_name
+
+ from aim import Run
+
+ self.writer = Run(repo=logging_dir, **kwargs)
+ self.writer.name = self.run_name
+ logger.debug(f"Initialized Aim project {self.run_name}")
+ logger.debug(
+ "Make sure to log any initial configurations with `self.store_init_configuration` before training!"
+ )
+
+ @property
+ def tracker(self):
+ return self.writer
+
+ @on_main_process
+ def store_init_configuration(self, values: dict):
+ """
+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
+
+ Args:
+ values (`dict`):
+ Values to be stored as initial hyperparameters as key-value pairs.
+ """
+ self.writer["hparams"] = values
+
+ @on_main_process
+ def log(self, values: dict, step: Optional[int], **kwargs):
+ """
+ Logs `values` to the current run.
+
+ Args:
+ values (`dict`):
+ Values to be logged as key-value pairs.
+ step (`int`, *optional*):
+ The run step. If included, the log will be affiliated with this step.
+ kwargs:
+ Additional key word arguments passed along to the `Run.track` method.
+ """
+ # Note: replace this with the dictionary support when merged
+ for key, value in values.items():
+ self.writer.track(value, name=key, step=step, **kwargs)
+
+ @on_main_process
+ def log_images(self, values: dict, step: Optional[int] = None, kwargs: Optional[Dict[str, dict]] = None):
+ """
+ Logs `images` to the current run.
+
+ Args:
+ values (`Dict[str, Union[np.ndarray, PIL.Image, Tuple[np.ndarray, str], Tuple[PIL.Image, str]]]`):
+ Values to be logged as key-value pairs. The values need to have type `np.ndarray` or PIL.Image. If a
+ tuple is provided, the first element should be the image and the second element should be the caption.
+ step (`int`, *optional*):
+ The run step. If included, the log will be affiliated with this step.
+ kwargs (`Dict[str, dict]`):
+ Additional key word arguments passed along to the `Run.Image` and `Run.track` method specified by the
+ keys `aim_image` and `track`, respectively.
+ """
+ import aim
+
+ aim_image_kw = {}
+ track_kw = {}
+
+ if kwargs is not None:
+ aim_image_kw = kwargs.get("aim_image", {})
+ track_kw = kwargs.get("track", {})
+
+ for key, value in values.items():
+ if isinstance(value, tuple):
+ img, caption = value
+ else:
+ img, caption = value, ""
+ aim_image = aim.Image(img, caption=caption, **aim_image_kw)
+ self.writer.track(aim_image, name=key, step=step, **track_kw)
+
+ @on_main_process
+ def finish(self):
+ """
+ Closes `aim` writer
+ """
+ self.writer.close()
+
+
+class MLflowTracker(GeneralTracker):
+ """
+ A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script.
+
+ Args:
+ experiment_name (`str`, *optional*):
+ Name of the experiment. Environment variable MLFLOW_EXPERIMENT_NAME has priority over this argument.
+ logging_dir (`str` or `os.PathLike`, defaults to `"."`):
+ Location for mlflow logs to be stored.
+ run_id (`str`, *optional*):
+ If specified, get the run with the specified UUID and log parameters and metrics under that run. The run’s
+ end time is unset and its status is set to running, but the run’s other attributes (source_version,
+ source_type, etc.) are not changed. Environment variable MLFLOW_RUN_ID has priority over this argument.
+ tags (`Dict[str, str]`, *optional*):
+ An optional `dict` of `str` keys and values, or a `str` dump from a `dict`, to set as tags on the run. If a
+ run is being resumed, these tags are set on the resumed run. If a new run is being created, these tags are
+ set on the new run. Environment variable MLFLOW_TAGS has priority over this argument.
+ nested_run (`bool`, *optional*, defaults to `False`):
+ Controls whether run is nested in parent run. True creates a nested run. Environment variable
+ MLFLOW_NESTED_RUN has priority over this argument.
+ run_name (`str`, *optional*):
+ Name of new run (stored as a mlflow.runName tag). Used only when `run_id` is unspecified.
+ description (`str`, *optional*):
+ An optional string that populates the description box of the run. If a run is being resumed, the
+ description is set on the resumed run. If a new run is being created, the description is set on the new
+ run.
+ """
+
+ name = "mlflow"
+ requires_logging_directory = False
+
+ @on_main_process
+ def __init__(
+ self,
+ experiment_name: str = None,
+ logging_dir: Optional[Union[str, os.PathLike]] = None,
+ run_id: Optional[str] = None,
+ tags: Optional[Union[Dict[str, Any], str]] = None,
+ nested_run: Optional[bool] = False,
+ run_name: Optional[str] = None,
+ description: Optional[str] = None,
+ ):
+ experiment_name = os.environ.get("MLFLOW_EXPERIMENT_NAME", experiment_name)
+ run_id = os.environ.get("MLFLOW_RUN_ID", run_id)
+ tags = os.environ.get("MLFLOW_TAGS", tags)
+ if isinstance(tags, str):
+ tags = json.loads(tags)
+
+ nested_run = os.environ.get("MLFLOW_NESTED_RUN", nested_run)
+
+ import mlflow
+
+ exps = mlflow.search_experiments(filter_string=f"name = '{experiment_name}'")
+ if len(exps) > 0:
+ if len(exps) > 1:
+ logger.warning("Multiple experiments with the same name found. Using first one.")
+ experiment_id = exps[0].experiment_id
+ else:
+ experiment_id = mlflow.create_experiment(
+ name=experiment_name,
+ artifact_location=logging_dir,
+ tags=tags,
+ )
+
+ self.active_run = mlflow.start_run(
+ run_id=run_id,
+ experiment_id=experiment_id,
+ run_name=run_name,
+ nested=nested_run,
+ tags=tags,
+ description=description,
+ )
+
+ logger.debug(f"Initialized mlflow experiment {experiment_name}")
+ logger.debug(
+ "Make sure to log any initial configurations with `self.store_init_configuration` before training!"
+ )
+
+ @property
+ def tracker(self):
+ return self.active_run
+
+ @on_main_process
+ def store_init_configuration(self, values: dict):
+ """
+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
+
+ Args:
+ values (`dict`):
+ Values to be stored as initial hyperparameters as key-value pairs.
+ """
+ import mlflow
+
+ for name, value in list(values.items()):
+ # internally, all values are converted to str in MLflow
+ if len(str(value)) > mlflow.utils.validation.MAX_PARAM_VAL_LENGTH:
+ logger.warning_once(
+ f'Accelerate is attempting to log a value of "{value}" for key "{name}" as a parameter. MLflow\'s'
+ f" log_param() only accepts values no longer than {mlflow.utils.validation.MAX_PARAM_VAL_LENGTH} characters so we dropped this attribute."
+ )
+ del values[name]
+
+ values_list = list(values.items())
+
+ # MLflow cannot log more than 100 values in one go, so we have to split it
+ for i in range(0, len(values_list), mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH):
+ mlflow.log_params(dict(values_list[i : i + mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH]))
+
+ logger.debug("Stored initial configuration hyperparameters to MLflow")
+
+ @on_main_process
+ def log(self, values: dict, step: Optional[int]):
+ """
+ Logs `values` to the current run.
+
+ Args:
+ values (`dict`):
+ Values to be logged as key-value pairs.
+ step (`int`, *optional*):
+ The run step. If included, the log will be affiliated with this step.
+ """
+ metrics = {}
+ for k, v in values.items():
+ if isinstance(v, (int, float)):
+ metrics[k] = v
+ else:
+ logger.warning_once(
+ f'MLflowTracker is attempting to log a value of "{v}" of type {type(v)} for key "{k}" as a metric. '
+ "MLflow's log_metric() only accepts float and int types so we dropped this attribute."
+ )
+ import mlflow
+
+ mlflow.log_metrics(metrics, step=step)
+ logger.debug("Successfully logged to mlflow")
+
+ @on_main_process
+ def finish(self):
+ """
+ End the active MLflow run.
+ """
+ import mlflow
+
+ mlflow.end_run()
+
+
+class ClearMLTracker(GeneralTracker):
+ """
+ A `Tracker` class that supports `clearml`. Should be initialized at the start of your script.
+
+ Args:
+ run_name (`str`, *optional*):
+ Name of the experiment. Environment variables `CLEARML_PROJECT` and `CLEARML_TASK` have priority over this
+ argument.
+ **kwargs (additional keyword arguments, *optional*):
+ Kwargs passed along to the `Task.__init__` method.
+ """
+
+ name = "clearml"
+ requires_logging_directory = False
+
+ @on_main_process
+ def __init__(self, run_name: str = None, **kwargs):
+ from clearml import Task
+
+ current_task = Task.current_task()
+ self._initialized_externally = False
+ if current_task:
+ self._initialized_externally = True
+ self.task = current_task
+ return
+
+ kwargs.setdefault("project_name", os.environ.get("CLEARML_PROJECT", run_name))
+ kwargs.setdefault("task_name", os.environ.get("CLEARML_TASK", run_name))
+ self.task = Task.init(**kwargs)
+
+ @property
+ def tracker(self):
+ return self.task
+
+ @on_main_process
+ def store_init_configuration(self, values: dict):
+ """
+ Connect configuration dictionary to the Task object. Should be run at the beginning of your experiment.
+
+ Args:
+ values (`dict`):
+ Values to be stored as initial hyperparameters as key-value pairs.
+ """
+ return self.task.connect_configuration(values)
+
+ @on_main_process
+ def log(self, values: Dict[str, Union[int, float]], step: Optional[int] = None, **kwargs):
+ """
+ Logs `values` dictionary to the current run. The dictionary keys must be strings. The dictionary values must be
+ ints or floats
+
+ Args:
+ values (`Dict[str, Union[int, float]]`):
+ Values to be logged as key-value pairs. If the key starts with 'eval_'/'test_'/'train_', the value will
+ be reported under the 'eval'/'test'/'train' series and the respective prefix will be removed.
+ Otherwise, the value will be reported under the 'train' series, and no prefix will be removed.
+ step (`int`, *optional*):
+ If specified, the values will be reported as scalars, with the iteration number equal to `step`.
+ Otherwise they will be reported as single values.
+ kwargs:
+ Additional key word arguments passed along to the `clearml.Logger.report_single_value` or
+ `clearml.Logger.report_scalar` methods.
+ """
+ clearml_logger = self.task.get_logger()
+ for k, v in values.items():
+ if not isinstance(v, (int, float)):
+ logger.warning_once(
+ "Accelerator is attempting to log a value of "
+ f'"{v}" of type {type(v)} for key "{k}" as a scalar. '
+ "This invocation of ClearML logger's report_scalar() "
+ "is incorrect so we dropped this attribute."
+ )
+ continue
+ if step is None:
+ clearml_logger.report_single_value(name=k, value=v, **kwargs)
+ continue
+ title, series = ClearMLTracker._get_title_series(k)
+ clearml_logger.report_scalar(title=title, series=series, value=v, iteration=step, **kwargs)
+
+ @on_main_process
+ def log_images(self, values: dict, step: Optional[int] = None, **kwargs):
+ """
+ Logs `images` to the current run.
+
+ Args:
+ values (`Dict[str, List[Union[np.ndarray, PIL.Image]]`):
+ Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or
+ step (`int`, *optional*):
+ The run step. If included, the log will be affiliated with this step.
+ kwargs:
+ Additional key word arguments passed along to the `clearml.Logger.report_image` method.
+ """
+ clearml_logger = self.task.get_logger()
+ for k, v in values.items():
+ title, series = ClearMLTracker._get_title_series(k)
+ clearml_logger.report_image(title=title, series=series, iteration=step, image=v, **kwargs)
+
+ @on_main_process
+ def log_table(
+ self,
+ table_name: str,
+ columns: List[str] = None,
+ data: List[List[Any]] = None,
+ dataframe: Any = None,
+ step: Optional[int] = None,
+ **kwargs,
+ ):
+ """
+ Log a Table to the task. Can be defined eitherwith `columns` and `data` or with `dataframe`.
+
+ Args:
+ table_name (`str`):
+ The name of the table
+ columns (list of `str`, *optional*):
+ The name of the columns on the table
+ data (List of List of Any data type, *optional*):
+ The data to be logged in the table. If `columns` is not specified, then the first entry in data will be
+ the name of the columns of the table
+ dataframe (Any data type, *optional*):
+ The data to be logged in the table
+ step (`int`, *optional*):
+ The run step. If included, the log will be affiliated with this step.
+ kwargs:
+ Additional key word arguments passed along to the `clearml.Logger.report_table` method.
+ """
+ to_report = dataframe
+ if dataframe is None:
+ if data is None:
+ raise ValueError(
+ "`ClearMLTracker.log_table` requires that `data` to be supplied if `dataframe` is `None`"
+ )
+ to_report = [columns] + data if columns else data
+ title, series = ClearMLTracker._get_title_series(table_name)
+ self.task.get_logger().report_table(title=title, series=series, table_plot=to_report, iteration=step, **kwargs)
+
+ @on_main_process
+ def finish(self):
+ """
+ Close the ClearML task. If the task was initialized externally (e.g. by manually calling `Task.init`), this
+ function is a noop
+ """
+ if self.task and not self._initialized_externally:
+ self.task.close()
+
+ @staticmethod
+ def _get_title_series(name):
+ for prefix in ["eval", "test", "train"]:
+ if name.startswith(prefix + "_"):
+ return name[len(prefix) + 1 :], prefix
+ return name, "train"
+
+
+class DVCLiveTracker(GeneralTracker):
+ """
+ A `Tracker` class that supports `dvclive`. Should be initialized at the start of your script.
+
+ Args:
+ run_name (`str`, *optional*):
+ Ignored for dvclive. See `kwargs` instead.
+ kwargs:
+ Additional key word arguments passed along to [`dvclive.Live()`](https://dvc.org/doc/dvclive/live).
+
+ Example:
+
+ ```py
+ from accelerate import Accelerator
+
+ accelerator = Accelerator(log_with="dvclive")
+ accelerator.init_trackers(project_name="my_project", init_kwargs={"dvclive": {"dir": "my_directory"}})
+ ```
+ """
+
+ name = "dvclive"
+ requires_logging_directory = False
+
+ @on_main_process
+ def __init__(self, run_name: Optional[str] = None, live: Optional[Any] = None, **kwargs):
+ from dvclive import Live
+
+ super().__init__()
+ self.live = live if live is not None else Live(**kwargs)
+
+ @property
+ def tracker(self):
+ return self.live
+
+ @on_main_process
+ def store_init_configuration(self, values: dict):
+ """
+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. Stores the
+ hyperparameters in a yaml file for future use.
+
+ Args:
+ values (Dictionary `str` to `bool`, `str`, `float`, `int`, or a List or Dict of those types):
+ Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
+ `str`, `float`, or `int`.
+ """
+ self.live.log_params(values)
+
+ @on_main_process
+ def log(self, values: dict, step: Optional[int] = None, **kwargs):
+ """
+ Logs `values` to the current run.
+
+ Args:
+ values (Dictionary `str` to `str`, `float`, or `int`):
+ Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`.
+ step (`int`, *optional*):
+ The run step. If included, the log will be affiliated with this step.
+ kwargs:
+ Additional key word arguments passed along to `dvclive.Live.log_metric()`.
+ """
+ from dvclive.plots import Metric
+
+ if step is not None:
+ self.live.step = step
+ for k, v in values.items():
+ if Metric.could_log(v):
+ self.live.log_metric(k, v, **kwargs)
+ else:
+ logger.warning_once(
+ "Accelerator attempted to log a value of "
+ f'"{v}" of type {type(v)} for key "{k}" as a scalar. '
+ "This invocation of DVCLive's Live.log_metric() "
+ "is incorrect so we dropped this attribute."
+ )
+ self.live.next_step()
+
+ @on_main_process
+ def finish(self):
+ """
+ Closes `dvclive.Live()`.
+ """
+ self.live.end()
+
+
+LOGGER_TYPE_TO_CLASS = {
+ "aim": AimTracker,
+ "comet_ml": CometMLTracker,
+ "mlflow": MLflowTracker,
+ "tensorboard": TensorBoardTracker,
+ "wandb": WandBTracker,
+ "clearml": ClearMLTracker,
+ "dvclive": DVCLiveTracker,
+}
+
+
+def filter_trackers(
+ log_with: List[Union[str, LoggerType, GeneralTracker]],
+ logging_dir: Union[str, os.PathLike] = None,
+):
+ """
+ Takes in a list of potential tracker types and checks that:
+ - The tracker wanted is available in that environment
+ - Filters out repeats of tracker types
+ - If `all` is in `log_with`, will return all trackers in the environment
+ - If a tracker requires a `logging_dir`, ensures that `logging_dir` is not `None`
+
+ Args:
+ log_with (list of `str`, [`~utils.LoggerType`] or [`~tracking.GeneralTracker`], *optional*):
+ A list of loggers to be setup for experiment tracking. Should be one or several of:
+
+ - `"all"`
+ - `"tensorboard"`
+ - `"wandb"`
+ - `"comet_ml"`
+ - `"mlflow"`
+ - `"dvclive"`
+ If `"all"` is selected, will pick up all available trackers in the environment and initialize them. Can
+ also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `"all"`.
+ logging_dir (`str`, `os.PathLike`, *optional*):
+ A path to a directory for storing logs of locally-compatible loggers.
+ """
+ loggers = []
+ if log_with is not None:
+ if not isinstance(log_with, (list, tuple)):
+ log_with = [log_with]
+ if "all" in log_with or LoggerType.ALL in log_with:
+ loggers = [o for o in log_with if issubclass(type(o), GeneralTracker)] + get_available_trackers()
+ else:
+ for log_type in log_with:
+ if log_type not in LoggerType and not issubclass(type(log_type), GeneralTracker):
+ raise ValueError(f"Unsupported logging capability: {log_type}. Choose between {LoggerType.list()}")
+ if issubclass(type(log_type), GeneralTracker):
+ loggers.append(log_type)
+ else:
+ log_type = LoggerType(log_type)
+ if log_type not in loggers:
+ if log_type in get_available_trackers():
+ tracker_init = LOGGER_TYPE_TO_CLASS[str(log_type)]
+ if tracker_init.requires_logging_directory:
+ if logging_dir is None:
+ raise ValueError(
+ f"Logging with `{log_type}` requires a `logging_dir` to be passed in."
+ )
+ loggers.append(log_type)
+ else:
+ logger.debug(f"Tried adding logger {log_type}, but package is unavailable in the system.")
+
+ return loggers
diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/bnb.py b/venv/lib/python3.10/site-packages/accelerate/utils/bnb.py
new file mode 100644
index 0000000000000000000000000000000000000000..284ee5df6e89171948745255dd33a3b2b91123a2
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/accelerate/utils/bnb.py
@@ -0,0 +1,467 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import logging
+import os
+from copy import deepcopy
+from typing import Dict, List, Optional, Union
+
+import torch
+import torch.nn as nn
+
+from accelerate.utils.imports import (
+ is_4bit_bnb_available,
+ is_8bit_bnb_available,
+)
+
+from ..big_modeling import dispatch_model, init_empty_weights
+from .dataclasses import BnbQuantizationConfig
+from .modeling import (
+ find_tied_parameters,
+ get_balanced_memory,
+ infer_auto_device_map,
+ load_checkpoint_in_model,
+ offload_weight,
+ set_module_tensor_to_device,
+)
+
+
+logger = logging.getLogger(__name__)
+
+
+def load_and_quantize_model(
+ model: torch.nn.Module,
+ bnb_quantization_config: BnbQuantizationConfig,
+ weights_location: Union[str, os.PathLike] = None,
+ device_map: Optional[Dict[str, Union[int, str, torch.device]]] = None,
+ no_split_module_classes: Optional[List[str]] = None,
+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,
+ offload_folder: Optional[Union[str, os.PathLike]] = None,
+ offload_state_dict: bool = False,
+):
+ """
+ This function will quantize the input model with the associated config passed in `bnb_quantization_config`. If the
+ model is in the meta device, we will load and dispatch the weights according to the `device_map` passed. If the
+ model is already loaded, we will quantize the model and put the model on the GPU,
+
+ Args:
+ model (`torch.nn.Module`):
+ Input model. The model can be already loaded or on the meta device
+ bnb_quantization_config (`BnbQuantizationConfig`):
+ The bitsandbytes quantization parameters
+ weights_location (`str` or `os.PathLike`):
+ The folder weights_location to load. It can be:
+ - a path to a file containing a whole model state dict
+ - a path to a `.json` file containing the index to a sharded checkpoint
+ - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint.
+ - a path to a folder containing a unique pytorch_model.bin file.
+ device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*):
+ A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer
+ name, once a given module name is inside, every submodule of it will be sent to the same device.
+ no_split_module_classes (`List[str]`, *optional*):
+ A list of layer class names that should never be split across device (for instance any layer that has a
+ residual connection).
+ max_memory (`Dict`, *optional*):
+ A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset.
+ offload_folder (`str` or `os.PathLike`, *optional*):
+ If the `device_map` contains any value `"disk"`, the folder where we will offload weights.
+ offload_state_dict (`bool`, *optional*, defaults to `False`):
+ If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if
+ the weight of the CPU state dict + the biggest shard does not fit.
+
+ Returns:
+ `torch.nn.Module`: The quantized model
+ """
+
+ load_in_4bit = bnb_quantization_config.load_in_4bit
+ load_in_8bit = bnb_quantization_config.load_in_8bit
+
+ if load_in_8bit and not is_8bit_bnb_available():
+ raise ImportError(
+ "You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
+ " make sure you have the latest version of `bitsandbytes` installed."
+ )
+ if load_in_4bit and not is_4bit_bnb_available():
+ raise ValueError(
+ "You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
+ "make sure you have the latest version of `bitsandbytes` installed."
+ )
+
+ modules_on_cpu = []
+ # custom device map
+ if isinstance(device_map, dict) and len(device_map.keys()) > 1:
+ modules_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
+
+ # We keep some modules such as the lm_head in their original dtype for numerical stability reasons
+ if bnb_quantization_config.skip_modules is None:
+ bnb_quantization_config.skip_modules = get_keys_to_not_convert(model)
+
+ # add cpu modules to skip modules only for 4-bit modules
+ if load_in_4bit:
+ bnb_quantization_config.skip_modules.extend(modules_on_cpu)
+ modules_to_not_convert = bnb_quantization_config.skip_modules
+
+ # We add the modules we want to keep in full precision
+ if bnb_quantization_config.keep_in_fp32_modules is None:
+ bnb_quantization_config.keep_in_fp32_modules = []
+ keep_in_fp32_modules = bnb_quantization_config.keep_in_fp32_modules
+ modules_to_not_convert.extend(keep_in_fp32_modules)
+
+ # compatibility with peft
+ model.is_loaded_in_4bit = load_in_4bit
+ model.is_loaded_in_8bit = load_in_8bit
+
+ model_device = get_parameter_device(model)
+ if model_device.type != "meta":
+ # quantization of an already loaded model
+ logger.warning(
+ "It is not recommended to quantize a loaded model. "
+ "The model should be instantiated under the `init_empty_weights` context manager."
+ )
+ model = replace_with_bnb_layers(model, bnb_quantization_config, modules_to_not_convert=modules_to_not_convert)
+ # convert param to the right dtype
+ dtype = bnb_quantization_config.torch_dtype
+ for name, param in model.state_dict().items():
+ if any(module_to_keep_in_fp32 in name for module_to_keep_in_fp32 in keep_in_fp32_modules):
+ param.to(torch.float32)
+ if param.dtype != torch.float32:
+ name = name.replace(".weight", "").replace(".bias", "")
+ param = getattr(model, name, None)
+ if param is not None:
+ param.to(torch.float32)
+ elif torch.is_floating_point(param):
+ param.to(dtype)
+ if model_device.type == "cuda":
+ # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
+ model.cuda(torch.cuda.current_device())
+ torch.cuda.empty_cache()
+ elif torch.cuda.is_available():
+ model.to(torch.cuda.current_device())
+ else:
+ raise RuntimeError("No GPU found. A GPU is needed for quantization.")
+ logger.info(
+ f"The model device type is {model_device.type}. However, cuda is needed for quantization."
+ "We move the model to cuda."
+ )
+ return model
+
+ elif weights_location is None:
+ raise RuntimeError(
+ f"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} "
+ )
+
+ else:
+ with init_empty_weights():
+ model = replace_with_bnb_layers(
+ model, bnb_quantization_config, modules_to_not_convert=modules_to_not_convert
+ )
+
+ device_map = get_quantized_model_device_map(
+ model,
+ bnb_quantization_config,
+ device_map,
+ max_memory=max_memory,
+ no_split_module_classes=no_split_module_classes,
+ )
+ if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
+ offload_state_dict = True
+
+ offload = any(x in list(device_map.values()) for x in ["cpu", "disk"])
+
+ load_checkpoint_in_model(
+ model,
+ weights_location,
+ device_map,
+ dtype=bnb_quantization_config.torch_dtype,
+ offload_folder=offload_folder,
+ offload_state_dict=offload_state_dict,
+ keep_in_fp32_modules=bnb_quantization_config.keep_in_fp32_modules,
+ offload_8bit_bnb=load_in_8bit and offload,
+ )
+ return dispatch_model(model, device_map=device_map, offload_dir=offload_folder)
+
+
+def get_quantized_model_device_map(
+ model, bnb_quantization_config, device_map=None, max_memory=None, no_split_module_classes=None
+):
+ if device_map is None:
+ if torch.cuda.is_available():
+ device_map = {"": torch.cuda.current_device()}
+ else:
+ raise RuntimeError("No GPU found. A GPU is needed for quantization.")
+ logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`.")
+
+ if isinstance(device_map, str):
+ if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
+ raise ValueError(
+ "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
+ "'sequential'."
+ )
+
+ special_dtypes = {}
+ special_dtypes.update(
+ {
+ name: bnb_quantization_config.torch_dtype
+ for name, _ in model.named_parameters()
+ if any(m in name for m in bnb_quantization_config.skip_modules)
+ }
+ )
+ special_dtypes.update(
+ {
+ name: torch.float32
+ for name, _ in model.named_parameters()
+ if any(m in name for m in bnb_quantization_config.keep_in_fp32_modules)
+ }
+ )
+
+ kwargs = {}
+ kwargs["special_dtypes"] = special_dtypes
+ kwargs["no_split_module_classes"] = no_split_module_classes
+ kwargs["dtype"] = bnb_quantization_config.target_dtype
+
+ # get max_memory for each device.
+ if device_map != "sequential":
+ max_memory = get_balanced_memory(
+ model,
+ low_zero=(device_map == "balanced_low_0"),
+ max_memory=max_memory,
+ **kwargs,
+ )
+
+ kwargs["max_memory"] = max_memory
+ device_map = infer_auto_device_map(model, **kwargs)
+
+ if isinstance(device_map, dict):
+ # check if don't have any quantized module on the cpu
+ modules_not_to_convert = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fp32_modules
+
+ device_map_without_some_modules = {
+ key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
+ }
+ for device in ["cpu", "disk"]:
+ if device in device_map_without_some_modules.values():
+ if bnb_quantization_config.load_in_4bit:
+ raise ValueError(
+ """
+ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
+ the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
+ these modules in `torch_dtype`, you need to pass a custom `device_map` to
+ `load_and_quantize_model`. Check
+ https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
+ for more details.
+ """
+ )
+ else:
+ logger.info(
+ "Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit"
+ )
+ del device_map_without_some_modules
+ return device_map
+
+
+def replace_with_bnb_layers(model, bnb_quantization_config, modules_to_not_convert=None, current_key_name=None):
+ """
+ A helper function to replace all `torch.nn.Linear` modules by `bnb.nn.Linear8bit` modules or by `bnb.nn.Linear4bit`
+ modules from the `bitsandbytes`library. The function will be run recursively and replace `torch.nn.Linear` modules.
+
+ Parameters:
+ model (`torch.nn.Module`):
+ Input model or `torch.nn.Module` as the function is run recursively.
+ modules_to_not_convert (`List[str]`):
+ Names of the modules to not quantize convert. In practice we keep the `lm_head` in full precision for
+ numerical stability reasons.
+ current_key_name (`List[str]`, *optional*):
+ An array to track the current key of the recursion. This is used to check whether the current key (part of
+ it) is not in the list of modules to not convert.
+ """
+
+ if modules_to_not_convert is None:
+ modules_to_not_convert = []
+
+ model, has_been_replaced = _replace_with_bnb_layers(
+ model, bnb_quantization_config, modules_to_not_convert, current_key_name
+ )
+ if not has_been_replaced:
+ logger.warning(
+ "You are loading your model in 8bit or 4bit but no linear modules were found in your model."
+ " this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
+ " Please double check your model architecture, or submit an issue on github if you think this is"
+ " a bug."
+ )
+ return model
+
+
+def _replace_with_bnb_layers(
+ model,
+ bnb_quantization_config,
+ modules_to_not_convert=None,
+ current_key_name=None,
+):
+ """
+ Private method that wraps the recursion for module replacement.
+
+ Returns the converted model and a boolean that indicates if the conversion has been successfull or not.
+ """
+ # bitsandbytes will initialize CUDA on import, so it needs to be imported lazily
+ import bitsandbytes as bnb
+
+ has_been_replaced = False
+ for name, module in model.named_children():
+ if current_key_name is None:
+ current_key_name = []
+ current_key_name.append(name)
+ if isinstance(module, nn.Linear) and name not in modules_to_not_convert:
+ # Check if the current key is not in the `modules_to_not_convert`
+ current_key_name_str = ".".join(current_key_name)
+ proceed = True
+ for key in modules_to_not_convert:
+ if (
+ (key in current_key_name_str) and (key + "." in current_key_name_str)
+ ) or key == current_key_name_str:
+ proceed = False
+ break
+ if proceed:
+ # Load bnb module with empty weight and replace ``nn.Linear` module
+ if bnb_quantization_config.load_in_8bit:
+ bnb_module = bnb.nn.Linear8bitLt(
+ module.in_features,
+ module.out_features,
+ module.bias is not None,
+ has_fp16_weights=False,
+ threshold=bnb_quantization_config.llm_int8_threshold,
+ )
+ elif bnb_quantization_config.load_in_4bit:
+ bnb_module = bnb.nn.Linear4bit(
+ module.in_features,
+ module.out_features,
+ module.bias is not None,
+ bnb_quantization_config.bnb_4bit_compute_dtype,
+ compress_statistics=bnb_quantization_config.bnb_4bit_use_double_quant,
+ quant_type=bnb_quantization_config.bnb_4bit_quant_type,
+ )
+ else:
+ raise ValueError("load_in_8bit and load_in_4bit can't be both False")
+ bnb_module.weight.data = module.weight.data
+ if module.bias is not None:
+ bnb_module.bias.data = module.bias.data
+ bnb_module.requires_grad_(False)
+ setattr(model, name, bnb_module)
+ has_been_replaced = True
+ if len(list(module.children())) > 0:
+ _, _has_been_replaced = _replace_with_bnb_layers(
+ module, bnb_quantization_config, modules_to_not_convert, current_key_name
+ )
+ has_been_replaced = has_been_replaced | _has_been_replaced
+ # Remove the last key for recursion
+ current_key_name.pop(-1)
+ return model, has_been_replaced
+
+
+def get_keys_to_not_convert(model):
+ r"""
+ An utility function to get the key of the module to keep in full precision if any For example for CausalLM modules
+ we may want to keep the lm_head in full precision for numerical stability reasons. For other architectures, we want
+ to keep the tied weights of the model. The function will return a list of the keys of the modules to not convert in
+ int8.
+
+ Parameters:
+ model (`torch.nn.Module`):
+ Input model
+ """
+ # Create a copy of the model
+ with init_empty_weights():
+ tied_model = deepcopy(model) # this has 0 cost since it is done inside `init_empty_weights` context manager`
+
+ tied_params = find_tied_parameters(tied_model)
+ # For compatibility with Accelerate < 0.18
+ if isinstance(tied_params, dict):
+ tied_keys = sum(list(tied_params.values()), []) + list(tied_params.keys())
+ else:
+ tied_keys = sum(tied_params, [])
+ has_tied_params = len(tied_keys) > 0
+
+ # Check if it is a base model
+ is_base_model = False
+ if hasattr(model, "base_model_prefix"):
+ is_base_model = not hasattr(model, model.base_model_prefix)
+
+ # Ignore this for base models (BertModel, GPT2Model, etc.)
+ if (not has_tied_params) and is_base_model:
+ return []
+
+ # otherwise they have an attached head
+ list_modules = list(model.named_children())
+ list_last_module = [list_modules[-1][0]]
+
+ # add last module together with tied weights
+ intersection = set(list_last_module) - set(tied_keys)
+ list_untouched = list(set(tied_keys)) + list(intersection)
+
+ # remove ".weight" from the keys
+ names_to_remove = [".weight", ".bias"]
+ filtered_module_names = []
+ for name in list_untouched:
+ for name_to_remove in names_to_remove:
+ if name_to_remove in name:
+ name = name.replace(name_to_remove, "")
+ filtered_module_names.append(name)
+
+ return filtered_module_names
+
+
+def has_4bit_bnb_layers(model):
+ """Check if we have `bnb.nn.Linear4bit` or `bnb.nn.Linear8bitLt` layers inside our model"""
+ # bitsandbytes will initialize CUDA on import, so it needs to be imported lazily
+ import bitsandbytes as bnb
+
+ for m in model.modules():
+ if isinstance(m, bnb.nn.Linear4bit):
+ return True
+ return False
+
+
+def get_parameter_device(parameter: nn.Module):
+ return next(parameter.parameters()).device
+
+
+def quantize_and_offload_8bit(model, param, param_name, new_dtype, offload_folder, offload_index, fp16_statistics):
+ # if it is not quantized, we quantize and offload the quantized weights and the SCB stats
+ if fp16_statistics is None:
+ set_module_tensor_to_device(model, param_name, 0, dtype=new_dtype, value=param)
+ tensor_name = param_name
+ module = model
+ if "." in tensor_name:
+ splits = tensor_name.split(".")
+ for split in splits[:-1]:
+ new_module = getattr(module, split)
+ if new_module is None:
+ raise ValueError(f"{module} has no attribute {split}.")
+ module = new_module
+ tensor_name = splits[-1]
+ # offload weights
+ module._parameters[tensor_name].requires_grad = False
+ offload_weight(module._parameters[tensor_name], param_name, offload_folder, index=offload_index)
+ if hasattr(module._parameters[tensor_name], "SCB"):
+ offload_weight(
+ module._parameters[tensor_name].SCB,
+ param_name.replace("weight", "SCB"),
+ offload_folder,
+ index=offload_index,
+ )
+ else:
+ offload_weight(param, param_name, offload_folder, index=offload_index)
+ offload_weight(fp16_statistics, param_name.replace("weight", "SCB"), offload_folder, index=offload_index)
+
+ set_module_tensor_to_device(model, param_name, "meta", dtype=new_dtype, value=torch.empty(*param.size()))
diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/dataclasses.py b/venv/lib/python3.10/site-packages/accelerate/utils/dataclasses.py
new file mode 100644
index 0000000000000000000000000000000000000000..63e8a3a49f7c765214d53c30a279097671c8838e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/accelerate/utils/dataclasses.py
@@ -0,0 +1,1717 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+General namespace and dataclass related classes
+"""
+
+import argparse
+import copy
+import enum
+import functools
+import os
+import typing
+import warnings
+from contextlib import contextmanager
+from dataclasses import dataclass, field
+from datetime import timedelta
+from typing import Any, Callable, Dict, Iterable, List, Literal, Optional, Tuple, get_args
+
+import torch
+
+from .constants import FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE
+from .environment import str_to_bool
+from .imports import is_cuda_available, is_npu_available, is_xpu_available
+from .versions import compare_versions
+
+
+class KwargsHandler:
+ """
+ Internal mixin that implements a `to_kwargs()` method for a dataclass.
+ """
+
+ def to_dict(self):
+ return copy.deepcopy(self.__dict__)
+
+ def to_kwargs(self):
+ """
+ Returns a dictionary containing the attributes with values different from the default of this class.
+ """
+ # import clear_environment here to avoid circular import problem
+ from .other import clear_environment
+
+ with clear_environment():
+ default_dict = self.__class__().to_dict()
+ this_dict = self.to_dict()
+ return {k: v for k, v in this_dict.items() if default_dict[k] != v}
+
+
+@dataclass
+class AutocastKwargs(KwargsHandler):
+ """
+ Use this object in your [`Accelerator`] to customize how `torch.autocast` behaves. Please refer to the
+ documentation of this [context manager](https://pytorch.org/docs/stable/amp.html#torch.autocast) for more
+ information on each argument.
+
+ Example:
+
+ ```python
+ from accelerate import Accelerator
+ from accelerate.utils import AutocastKwargs
+
+ kwargs = AutocastKwargs(cache_enabled=True)
+ accelerator = Accelerator(kwargs_handlers=[kwargs])
+ ```
+ """
+
+ enabled: bool = True
+ cache_enabled: bool = None
+
+
+@dataclass
+class DistributedDataParallelKwargs(KwargsHandler):
+ """
+ Use this object in your [`Accelerator`] to customize how your model is wrapped in a
+ `torch.nn.parallel.DistributedDataParallel`. Please refer to the documentation of this
+ [wrapper](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html) for more
+ information on each argument.
+
+
+
+ `gradient_as_bucket_view` is only available in PyTorch 1.7.0 and later versions.
+
+ `static_graph` is only available in PyTorch 1.11.0 and later versions.
+
+
+
+ Example:
+
+ ```python
+ from accelerate import Accelerator
+ from accelerate.utils import DistributedDataParallelKwargs
+
+ kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
+ accelerator = Accelerator(kwargs_handlers=[kwargs])
+ ```
+ """
+
+ dim: int = 0
+ broadcast_buffers: bool = True
+ bucket_cap_mb: int = 25
+ find_unused_parameters: bool = False
+ check_reduction: bool = False
+ gradient_as_bucket_view: bool = False
+ static_graph: bool = False
+
+
+@dataclass
+class GradScalerKwargs(KwargsHandler):
+ """
+ Use this object in your [`Accelerator`] to customize the behavior of mixed precision, specifically how the
+ `torch.cuda.amp.GradScaler` used is created. Please refer to the documentation of this
+ [scaler](https://pytorch.org/docs/stable/amp.html?highlight=gradscaler) for more information on each argument.
+
+
+
+ `GradScaler` is only available in PyTorch 1.5.0 and later versions.
+
+
+
+ Example:
+
+ ```python
+ from accelerate import Accelerator
+ from accelerate.utils import GradScalerKwargs
+
+ kwargs = GradScalerKwargs(backoff_filter=0.25)
+ accelerator = Accelerator(kwargs_handlers=[kwargs])
+ ```
+ """
+
+ init_scale: float = 65536.0
+ growth_factor: float = 2.0
+ backoff_factor: float = 0.5
+ growth_interval: int = 2000
+ enabled: bool = True
+
+
+@dataclass
+class InitProcessGroupKwargs(KwargsHandler):
+ """
+ Use this object in your [`Accelerator`] to customize the initialization of the distributed processes. Please refer
+ to the documentation of this
+ [method](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) for more
+ information on each argument.
+
+ ```python
+ from datetime import timedelta
+ from accelerate import Accelerator
+ from accelerate.utils import InitProcessGroupKwargs
+
+ kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=800))
+ accelerator = Accelerator(kwargs_handlers=[kwargs])
+ ```
+ """
+
+ backend: Optional[str] = "nccl"
+ init_method: Optional[str] = None
+ timeout: timedelta = timedelta(seconds=1800)
+
+
+# Literals
+Backend = Literal["MSAMP", "TE"]
+OptLevel = Literal["O1", "O2"]
+FP8Format = Literal["E4M3", "HYBRID"]
+AmaxComputeAlgorithm = Literal["max", "most_recent"]
+
+
+@dataclass
+class FP8RecipeKwargs(KwargsHandler):
+ """
+ Use this object in your [`Accelerator`] to customize the initialization of the recipe for FP8 mixed precision
+ training with `transformer-engine` or `ms-amp`.
+
+
+
+ For more information on `transformer-engine` args, please refer to the API
+ [documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html).
+
+ For more information on the `ms-amp` args, please refer to the Optimization Level
+ [documentation](https://azure.github.io/MS-AMP/docs/user-tutorial/optimization-level).
+
+
+
+ ```python
+ from accelerate import Accelerator
+ from accelerate.utils import FP8RecipeKwargs
+
+ kwargs = FP8RecipeKwargs(backend="te", fp8_format="HYBRID")
+ accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=[kwargs])
+ ```
+
+ To use MS-AMP as an engine, pass `backend="msamp"` and the `optimization_level`:
+
+ ```python
+ kwargs = FP8RecipeKwargs(backend="msamp", optimization_level="02")
+ ```
+
+ Args:
+ backend (`str`, *optional*, defaults to "msamp"):
+ Which FP8 engine to use. Must be one of `"msamp"` (MS-AMP) or `"te"` (TransformerEngine).
+ margin (`int`, *optional*, default to 0):
+ The margin to use for the gradient scaling.
+ interval (`int`, *optional*, default to 1):
+ The interval to use for how often the scaling factor is recomputed.
+ fp8_format (`str`, *optional*, default to "E4M3"):
+ The format to use for the FP8 recipe. Must be one of `E4M3` or `HYBRID`.
+ amax_history_len (`int`, *optional*, default to 1024):
+ The length of the history to use for the scaling factor computation
+ amax_compute_algo (`str`, *optional*, default to "most_recent"):
+ The algorithm to use for the scaling factor computation. Must be one of `max` or `most_recent`.
+ override_linear_precision (`tuple` of three `bool`, *optional*, default to `(False, False, False)`):
+ Whether or not to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision.
+ optimization_level (`str`), one of `O1`, `O2`. (default is `O2`):
+ What level of 8-bit collective communication should be used with MS-AMP. In general:
+ * O1: Weight gradients and `all_reduce` communications are done in fp8, reducing GPU
+ memory usage and communication bandwidth
+ * O2: First-order optimizer states are in 8-bit, and second order states are in FP16.
+ Only available when using Adam or AdamW. This maintains accuracy and can potentially save the
+ highest memory.
+ * 03: Specifically for DeepSpeed, implements capabilities so weights and master weights of models
+ are stored in FP8. If `fp8` is selected and deepspeed is enabled, will be used by default. (Not
+ available currently).
+ """
+
+ backend: Backend = "MSAMP"
+ opt_level: OptLevel = "O2"
+ margin: int = 0
+ interval: int = 1
+ fp8_format: FP8Format = "E4M3"
+ amax_history_len: int = 1
+ amax_compute_algo: AmaxComputeAlgorithm = "most_recent"
+ override_linear_precision: Tuple[bool, bool, bool] = (False, False, False)
+
+ def __post_init__(self):
+ if self.backend.upper() not in get_args(Backend):
+ raise ValueError("`backend` must be 'MSAMP' or 'TE' (TransformerEngine).")
+
+ self.backend = self.backend.upper()
+ # Check TE args
+ if self.backend == "TE":
+ self.fp8_format = self.fp8_format.upper()
+ if self.fp8_format not in get_args(FP8Format):
+ raise ValueError(f"`fp8_format` must be one of {' or '.join(get_args(FP8Format))}.")
+ if self.amax_compute_algo not in get_args(AmaxComputeAlgorithm):
+ raise ValueError(f"`amax_compute_algo` must be one of {' or '.join(get_args(AmaxComputeAlgorithm))}")
+ elif self.backend == "MSAMP":
+ if self.opt_level not in get_args(OptLevel):
+ raise ValueError(f"`optimization_level` must be one of {' or '.join(get_args(OptLevel))}")
+
+
+class EnumWithContains(enum.EnumMeta):
+ "A metaclass that adds the ability to check if `self` contains an item with the `in` operator"
+
+ def __contains__(cls, item):
+ try:
+ cls(item)
+ except ValueError:
+ return False
+ return True
+
+
+class BaseEnum(enum.Enum, metaclass=EnumWithContains):
+ "An enum class that can get the value of an item with `str(Enum.key)`"
+
+ def __str__(self):
+ return self.value
+
+ @classmethod
+ def list(cls):
+ "Method to list all the possible items in `cls`"
+ return list(map(str, cls))
+
+
+class DeprecatedFieldDescriptor:
+ """
+ Descriptor for deprecated fields in an enum class.
+
+ Args:
+ field_name (`str`):
+ The name of the deprecated field.
+ replaced_with (`str`):
+ The name of the field that replaces the deprecated one.
+ """
+
+ def __init__(self, field_name, replaced_with):
+ self.field_name = field_name
+ self.replaced_with = replaced_with
+
+ def __get__(self, instance, owner):
+ warnings.warn(
+ f"The `{self.field_name}` of `{owner}` is deprecated and will be removed in v1.0.0. "
+ f"Please use the `{self.replaced_with}` instead.",
+ FutureWarning,
+ )
+ return getattr(owner, self.replaced_with)
+
+
+class DistributedType(str, enum.Enum):
+ """
+ Represents a type of distributed environment.
+
+ Values:
+
+ - **NO** -- Not a distributed environment, just a single process.
+ - **MULTI_CPU** -- Distributed on multiple CPU nodes.
+ - **MULTI_GPU** -- Distributed on multiple GPUs.
+ - **MULTI_MLU** -- Distributed on multiple MLUs.
+ - **MULTI_NPU** -- Distributed on multiple NPUs.
+ - **MULTI_XPU** -- Distributed on multiple XPUs.
+ - **DEEPSPEED** -- Using DeepSpeed.
+ - **XLA** -- Using TorchXLA.
+ - **TPU** -- This field will be deprecated in v0.27.0. Use XLA instead.
+ """
+
+ # Subclassing str as well as Enum allows the `DistributedType` to be JSON-serializable out of the box.
+ NO = "NO"
+ MULTI_CPU = "MULTI_CPU"
+ MULTI_GPU = "MULTI_GPU"
+ MULTI_NPU = "MULTI_NPU"
+ MULTI_MLU = "MULTI_MLU"
+ MULTI_XPU = "MULTI_XPU"
+ DEEPSPEED = "DEEPSPEED"
+ FSDP = "FSDP"
+ XLA = "XLA"
+ MEGATRON_LM = "MEGATRON_LM"
+ TPU = DeprecatedFieldDescriptor("TPU", "XLA")
+
+
+class SageMakerDistributedType(str, enum.Enum):
+ """
+ Represents a type of distributed environment.
+
+ Values:
+
+ - **NO** -- Not a distributed environment, just a single process.
+ - **DATA_PARALLEL** -- using sagemaker distributed data parallelism.
+ - **MODEL_PARALLEL** -- using sagemaker distributed model parallelism.
+ """
+
+ # Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box.
+ NO = "NO"
+ DATA_PARALLEL = "DATA_PARALLEL"
+ MODEL_PARALLEL = "MODEL_PARALLEL"
+
+
+class ComputeEnvironment(str, enum.Enum):
+ """
+ Represents a type of the compute environment.
+
+ Values:
+
+ - **LOCAL_MACHINE** -- private/custom cluster hardware.
+ - **AMAZON_SAGEMAKER** -- Amazon SageMaker as compute environment.
+ """
+
+ # Subclassing str as well as Enum allows the `ComputeEnvironment` to be JSON-serializable out of the box.
+ LOCAL_MACHINE = "LOCAL_MACHINE"
+ AMAZON_SAGEMAKER = "AMAZON_SAGEMAKER"
+
+
+class DynamoBackend(str, BaseEnum):
+ """
+ Represents a dynamo backend (see https://pytorch.org/docs/stable/torch.compiler.html).
+
+ Values:
+
+ - **NO** -- Do not use torch dynamo.
+ - **EAGER** -- Uses PyTorch to run the extracted GraphModule. This is quite useful in debugging TorchDynamo
+ issues.
+ - **AOT_EAGER** -- Uses AotAutograd with no compiler, i.e, just using PyTorch eager for the AotAutograd's
+ extracted forward and backward graphs. This is useful for debugging, and unlikely to give speedups.
+ - **INDUCTOR** -- Uses TorchInductor backend with AotAutograd and cudagraphs by leveraging codegened Triton
+ kernels. [Read
+ more](https://dev-discuss.pytorch.org/t/torchinductor-a-pytorch-native-compiler-with-define-by-run-ir-and-symbolic-shapes/747)
+ - **AOT_TS_NVFUSER** -- nvFuser with AotAutograd/TorchScript. [Read
+ more](https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593)
+ - **NVPRIMS_NVFUSER** -- nvFuser with PrimTorch. [Read
+ more](https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593)
+ - **CUDAGRAPHS** -- cudagraphs with AotAutograd. [Read more](https://github.com/pytorch/torchdynamo/pull/757)
+ - **OFI** -- Uses Torchscript optimize_for_inference. Inference only. [Read
+ more](https://pytorch.org/docs/stable/generated/torch.jit.optimize_for_inference.html)
+ - **FX2TRT** -- Uses Nvidia TensorRT for inference optimizations. Inference only. [Read
+ more](https://github.com/pytorch/TensorRT/blob/master/docsrc/tutorials/getting_started_with_fx_path.rst)
+ - **ONNXRT** -- Uses ONNXRT for inference on CPU/GPU. Inference only. [Read more](https://onnxruntime.ai/)
+ - **TENSORRT** -- Uses ONNXRT to run TensorRT for inference optimizations. [Read
+ more](https://github.com/onnx/onnx-tensorrt)
+ - **IPEX** -- Uses IPEX for inference on CPU. Inference only. [Read
+ more](https://github.com/intel/intel-extension-for-pytorch).
+ - **TVM** -- Uses Apach TVM for inference optimizations. [Read more](https://tvm.apache.org/)
+
+ """
+
+ # Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box.
+ NO = "NO"
+ EAGER = "EAGER"
+ AOT_EAGER = "AOT_EAGER"
+ INDUCTOR = "INDUCTOR"
+ AOT_TS_NVFUSER = "AOT_TS_NVFUSER"
+ NVPRIMS_NVFUSER = "NVPRIMS_NVFUSER"
+ CUDAGRAPHS = "CUDAGRAPHS"
+ OFI = "OFI"
+ FX2TRT = "FX2TRT"
+ ONNXRT = "ONNXRT"
+ TENSORRT = "TENSORRT"
+ IPEX = "IPEX"
+ TVM = "TVM"
+
+
+class LoggerType(BaseEnum):
+ """Represents a type of supported experiment tracker
+
+ Values:
+
+ - **ALL** -- all available trackers in the environment that are supported
+ - **TENSORBOARD** -- TensorBoard as an experiment tracker
+ - **WANDB** -- wandb as an experiment tracker
+ - **COMETML** -- comet_ml as an experiment tracker
+ - **DVCLIVE** -- dvclive as an experiment tracker
+ """
+
+ ALL = "all"
+ AIM = "aim"
+ TENSORBOARD = "tensorboard"
+ WANDB = "wandb"
+ COMETML = "comet_ml"
+ MLFLOW = "mlflow"
+ CLEARML = "clearml"
+ DVCLIVE = "dvclive"
+
+
+class PrecisionType(BaseEnum):
+ """Represents a type of precision used on floating point values
+
+ Values:
+
+ - **NO** -- using full precision (FP32)
+ - **FP16** -- using half precision
+ - **BF16** -- using brain floating point precision
+ """
+
+ NO = "no"
+ FP8 = "fp8"
+ FP16 = "fp16"
+ BF16 = "bf16"
+
+
+class RNGType(BaseEnum):
+ TORCH = "torch"
+ CUDA = "cuda"
+ MLU = "mlu"
+ NPU = "npu"
+ XLA = "xla"
+ XPU = "xpu"
+ GENERATOR = "generator"
+
+
+class CustomDtype(enum.Enum):
+ r"""
+ An enum that contains multiple custom dtypes that can be used for `infer_auto_device_map`.
+ """
+
+ FP8 = "fp8"
+ INT4 = "int4"
+ INT2 = "int2"
+
+
+# data classes
+
+
+@dataclass
+class TensorInformation:
+ shape: torch.Size
+ dtype: torch.dtype
+
+
+@dataclass
+class DataLoaderConfiguration:
+ """
+ Configuration for dataloader-related items when calling `accelerator.prepare`.
+ """
+
+ split_batches: bool = field(
+ default=False,
+ metadata={
+ "help": "Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If"
+ " `True` the actual batch size used will be the same on any kind of distributed processes, but it must be a"
+ " round multiple of the `num_processes` you are using. If `False`, actual batch size used will be the one set"
+ " in your script multiplied by the number of processes."
+ },
+ )
+ dispatch_batches: bool = field(
+ default=None,
+ metadata={
+ "help": "If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process"
+ " and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose"
+ " underlying dataset is an `IterableDataslet`, `False` otherwise."
+ },
+ )
+ even_batches: bool = field(
+ default=True,
+ metadata={
+ "help": "If set to `True`, in cases where the total batch size across all processes does not exactly divide the"
+ " dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among"
+ " all workers."
+ },
+ )
+ use_seedable_sampler: bool = field(
+ default=False,
+ metadata={
+ "help": "Whether or not use a fully seedable random sampler ([`data_loader.SeedableRandomSampler`])."
+ "Ensures training results are fully reproducable using a different sampling technique. "
+ "While seed-to-seed results may differ, on average the differences are neglible when using"
+ "multiple different seeds to compare. Should also be ran with [`~utils.set_seed`] for the best results."
+ },
+ )
+
+
+@dataclass
+class ProjectConfiguration:
+ """
+ Configuration for the Accelerator object based on inner-project needs.
+ """
+
+ project_dir: str = field(default=None, metadata={"help": "A path to a directory for storing data."})
+ logging_dir: str = field(
+ default=None,
+ metadata={
+ "help": "A path to a directory for storing logs of locally-compatible loggers. If None, defaults to `project_dir`."
+ },
+ )
+ automatic_checkpoint_naming: bool = field(
+ default=False,
+ metadata={"help": "Whether saved states should be automatically iteratively named."},
+ )
+
+ total_limit: int = field(
+ default=None,
+ metadata={"help": "The maximum number of total saved states to keep."},
+ )
+
+ iteration: int = field(
+ default=0,
+ metadata={"help": "The current save iteration."},
+ )
+
+ save_on_each_node: bool = field(
+ default=False,
+ metadata={
+ "help": (
+ "When doing multi-node distributed training, whether to save models and checkpoints on each node, or"
+ " only on the main one"
+ )
+ },
+ )
+
+ def set_directories(self, project_dir: str = None):
+ "Sets `self.project_dir` and `self.logging_dir` to the appropriate values."
+ self.project_dir = project_dir
+ if self.logging_dir is None:
+ self.logging_dir = project_dir
+
+ def __post_init__(self):
+ self.set_directories(self.project_dir)
+
+
+@dataclass
+class GradientAccumulationPlugin(KwargsHandler):
+ """
+ A plugin to configure gradient accumulation behavior. You can only pass one of `gradient_accumulation_plugin` or
+ `gradient_accumulation_steps` to [`Accelerator`]. Passing both raises an error.
+
+ Parameters:
+ num_steps (`int`):
+ The number of steps to accumulate gradients for.
+ adjust_scheduler (`bool`, *optional*, defaults to `True`):
+ Whether to adjust the scheduler steps to account for the number of steps being accumulated. Should be
+ `True` if the used scheduler was not adjusted for gradient accumulation.
+ sync_with_dataloader (`bool`, *optional*, defaults to `True`):
+ Whether to synchronize setting the gradients when at the end of the dataloader.
+ sync_each_batch (`bool`, *optional*):
+ Whether to synchronize setting the gradients at each data batch. Seting to `True` may reduce memory
+ requirements when using gradient accumulation with distributed training, at expense of speed.
+
+ Example:
+
+ ```python
+ from accelerate.utils import GradientAccumulationPlugin
+
+ gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=2)
+ accelerator = Accelerator(gradient_accumulation_plugin=gradient_accumulation_plugin)
+ ```
+ """
+
+ num_steps: int = field(default=None, metadata={"help": "The number of steps to accumulate gradients for."})
+ adjust_scheduler: bool = field(
+ default=True,
+ metadata={
+ "help": "Whether to adjust the scheduler steps to account for the number of steps being accumulated. Should be `True` if the used scheduler was not adjusted for gradient accumulation."
+ },
+ )
+ sync_with_dataloader: bool = field(
+ default=True,
+ metadata={
+ "help": "Whether to synchronize setting the gradients when at the end of the dataloader. Should only be set to `False` if you know what you're doing."
+ },
+ )
+ sync_each_batch: bool = field(
+ default=False,
+ metadata={
+ "help": "Whether to synchronize setting the gradients at each data batch. Setting to `True` may reduce memory requirements when using gradient accumulation with distributed training, at expense of speed."
+ },
+ )
+
+
+@dataclass
+class TorchDynamoPlugin(KwargsHandler):
+ """
+ This plugin is used to compile a model with PyTorch 2.0
+ """
+
+ backend: DynamoBackend = field(
+ default=None,
+ metadata={"help": f"Possible options are {[b.value.lower() for b in DynamoBackend]}"},
+ )
+ mode: str = field(
+ default=None, metadata={"help": "Possible options are 'default', 'reduce-overhead' or 'max-autotune'"}
+ )
+ fullgraph: bool = field(default=None, metadata={"help": "Whether it is ok to break model into several subgraphs"})
+ dynamic: bool = field(default=None, metadata={"help": "Whether to use dynamic shape for tracing"})
+ options: Any = field(default=None, metadata={"help": "A dictionary of options to pass to the backend."})
+ disable: bool = field(default=False, metadata={"help": "Turn torch.compile() into a no-op for testing"})
+
+ def __post_init__(self):
+ prefix = "ACCELERATE_DYNAMO_"
+ if self.backend is None:
+ self.backend = os.environ.get(prefix + "BACKEND", "no")
+ self.backend = DynamoBackend(self.backend.upper())
+ if self.mode is None:
+ self.mode = os.environ.get(prefix + "MODE", "default")
+ if self.fullgraph is None:
+ self.fullgraph = str_to_bool(os.environ.get(prefix + "USE_FULLGRAPH", "False")) == 1
+ if self.dynamic is None:
+ self.dynamic = str_to_bool(os.environ.get(prefix + "USE_DYNAMIC", "False")) == 1
+
+ def to_dict(self):
+ dynamo_config = copy.deepcopy(self.__dict__)
+ dynamo_config["backend"] = dynamo_config["backend"].value.lower()
+ return dynamo_config
+
+
+@dataclass
+class DeepSpeedPlugin:
+ """
+ This plugin is used to integrate DeepSpeed.
+ """
+
+ hf_ds_config: Any = field(
+ default=None,
+ metadata={
+ "help": "path to DeepSpeed config file or dict or an object of class `accelerate.utils.deepspeed.HfDeepSpeedConfig`."
+ },
+ )
+ gradient_accumulation_steps: int = field(
+ default=None,
+ metadata={
+ "help": "Number of steps to accumulate gradients before updating optimizer states. If not set, will use the value from the `Accelerator` directly."
+ },
+ )
+ gradient_clipping: float = field(default=None, metadata={"help": "Enable gradient clipping with value"})
+ zero_stage: int = field(
+ default=None,
+ metadata={"help": "Possible options are 0,1,2,3; Default will be taken from environment variable"},
+ )
+ is_train_batch_min: str = field(
+ default=True,
+ metadata={"help": "If both train & eval dataloaders are specified, this will decide the train_batch_size"},
+ )
+ offload_optimizer_device: bool = field(
+ default=None,
+ metadata={"help": "Possible options are none|cpu|nvme. Only applicable with ZeRO Stages 2 and 3."},
+ )
+ offload_param_device: bool = field(
+ default=None,
+ metadata={"help": "Possible options are none|cpu|nvme. Only applicable with ZeRO Stage 3."},
+ )
+ offload_optimizer_nvme_path: str = field(
+ default=None,
+ metadata={"help": "Possible options are /nvme|/local_nvme. Only applicable with ZeRO Stage 3."},
+ )
+ offload_param_nvme_path: str = field(
+ default=None,
+ metadata={"help": "Possible options are /nvme|/local_nvme. Only applicable with ZeRO Stage 3."},
+ )
+ zero3_init_flag: bool = field(
+ default=None,
+ metadata={
+ "help": "Flag to indicate whether to enable `deepspeed.zero.Init` for constructing massive models."
+ "Only applicable with ZeRO Stage-3."
+ },
+ )
+ zero3_save_16bit_model: bool = field(
+ default=None,
+ metadata={"help": "Flag to indicate whether to save 16-bit model. Only applicable with ZeRO Stage-3."},
+ )
+
+ def __post_init__(self):
+ from .deepspeed import HfDeepSpeedConfig
+
+ if self.gradient_accumulation_steps is None:
+ gas = os.environ.get("ACCELERATE_GRADIENT_ACCUMULATION_STEPS", "auto")
+ self.gradient_accumulation_steps = int(gas) if gas.isdigit() else gas
+
+ if self.gradient_clipping is None:
+ gradient_clipping = os.environ.get("ACCELERATE_GRADIENT_CLIPPING", "none")
+ if gradient_clipping != "none":
+ self.gradient_clipping = float(gradient_clipping)
+
+ if self.zero_stage is None:
+ self.zero_stage = int(os.environ.get("ACCELERATE_DEEPSPEED_ZERO_STAGE", 2))
+
+ if self.offload_optimizer_device is None:
+ self.offload_optimizer_device = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE", "none")
+
+ if self.offload_param_device is None:
+ self.offload_param_device = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE", "none")
+
+ if self.offload_optimizer_nvme_path is None:
+ self.offload_optimizer_nvme_path = os.environ.get(
+ "ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_NVME_PATH", "none"
+ )
+
+ if self.offload_param_nvme_path is None:
+ self.offload_param_nvme_path = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_NVME_PATH", "none")
+
+ if self.zero3_save_16bit_model is None:
+ self.zero3_save_16bit_model = (
+ os.environ.get("ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL", "false") == "true"
+ )
+
+ if self.hf_ds_config is None:
+ self.hf_ds_config = os.environ.get("ACCELERATE_DEEPSPEED_CONFIG_FILE", "none")
+ if (
+ isinstance(self.hf_ds_config, dict)
+ or (isinstance(self.hf_ds_config, str) and self.hf_ds_config != "none")
+ or isinstance(self.hf_ds_config, HfDeepSpeedConfig)
+ ):
+ if not isinstance(self.hf_ds_config, HfDeepSpeedConfig):
+ self.hf_ds_config = HfDeepSpeedConfig(self.hf_ds_config)
+ if "gradient_accumulation_steps" not in self.hf_ds_config.config:
+ self.hf_ds_config.config["gradient_accumulation_steps"] = 1
+ if "zero_optimization" not in self.hf_ds_config.config:
+ raise ValueError("Please specify the ZeRO optimization config in the DeepSpeed config.")
+
+ self._deepspeed_config_checks()
+ plugin_to_config_mapping = {
+ "gradient_accumulation_steps": "gradient_accumulation_steps",
+ "gradient_clipping": "gradient_clipping",
+ "zero_stage": "zero_optimization.stage",
+ "offload_optimizer_device": "zero_optimization.offload_optimizer.device",
+ "offload_param_device": "zero_optimization.offload_param.device",
+ "offload_param_nvme_path": "zero_optimization.offload_param.nvme_path",
+ "offload_optimizer_nvme_path": "zero_optimization.offload_optimizer.nvme_path",
+ "zero3_save_16bit_model": "zero_optimization.stage3_gather_16bit_weights_on_model_save",
+ }
+ kwargs = {v: getattr(self, k) for k, v in plugin_to_config_mapping.items() if getattr(self, k) is not None}
+ for key in kwargs.keys():
+ self.fill_match(key, **kwargs, must_match=False)
+ self.hf_ds_config.set_stage_and_offload()
+
+ # filling the missing values in the class attributes from the DeepSpeed config
+ # when using the DeepSpeed config file.
+ for key, value in plugin_to_config_mapping.items():
+ config_value = self.hf_ds_config.get_value(value)
+ if config_value is not None and config_value != "auto":
+ setattr(self, key, config_value)
+ else:
+ config = {
+ "train_batch_size": "auto",
+ "train_micro_batch_size_per_gpu": "auto",
+ "gradient_accumulation_steps": self.gradient_accumulation_steps,
+ "zero_optimization": {
+ "stage": self.zero_stage,
+ "offload_optimizer": {
+ "device": self.offload_optimizer_device,
+ "nvme_path": self.offload_optimizer_nvme_path
+ if self.offload_optimizer_device == "nvme"
+ else None,
+ },
+ "offload_param": {
+ "device": self.offload_param_device,
+ "nvme_path": self.offload_param_nvme_path if self.offload_param_device == "nvme" else None,
+ },
+ "stage3_gather_16bit_weights_on_model_save": self.zero3_save_16bit_model,
+ },
+ }
+ if self.gradient_clipping:
+ config["gradient_clipping"] = self.gradient_clipping
+ self.hf_ds_config = HfDeepSpeedConfig(config)
+
+ self.deepspeed_config = self.hf_ds_config.config
+ self.deepspeed_config["steps_per_print"] = float("inf") # this will stop deepspeed from logging @ stdout
+ if self.zero3_init_flag is None:
+ self.zero3_init_flag = (
+ str_to_bool(os.environ.get("ACCELERATE_DEEPSPEED_ZERO3_INIT", str(self.hf_ds_config.is_zero3()))) == 1
+ )
+ if self.zero3_init_flag and not self.hf_ds_config.is_zero3():
+ warnings.warn("DeepSpeed Zero3 Init flag is only applicable for ZeRO Stage 3. Setting it to False.")
+ self.zero3_init_flag = False
+
+ def fill_match(self, ds_key_long, mismatches=None, must_match=True, **kwargs):
+ mismatches = [] if mismatches is None else mismatches
+ config, ds_key = self.hf_ds_config.find_config_node(ds_key_long)
+ if config is None:
+ return
+
+ if config.get(ds_key) == "auto":
+ if ds_key_long in kwargs:
+ config[ds_key] = kwargs[ds_key_long]
+ return
+ else:
+ raise ValueError(
+ f"`{ds_key_long}` not found in kwargs. "
+ f"Please specify `{ds_key_long}` without `auto` (set to correct value) in the DeepSpeed config file or "
+ "pass it in kwargs."
+ )
+
+ if not must_match:
+ return
+
+ ds_val = config.get(ds_key)
+ if ds_val is not None and ds_key_long in kwargs:
+ if ds_val != kwargs[ds_key_long]:
+ mismatches.append(f"- ds {ds_key_long}={ds_val} vs arg {ds_key_long}={kwargs[ds_key_long]}")
+
+ def is_auto(self, ds_key_long):
+ val = self.hf_ds_config.get_value(ds_key_long)
+ if val is None:
+ return False
+ else:
+ return val == "auto"
+
+ def get_value(self, ds_key_long, default=None):
+ return self.hf_ds_config.get_value(ds_key_long, default)
+
+ def deepspeed_config_process(self, prefix="", mismatches=None, config=None, must_match=True, **kwargs):
+ """Process the DeepSpeed config with the values from the kwargs."""
+ mismatches = [] if mismatches is None else mismatches
+ if config is None:
+ config = self.deepspeed_config
+ for key, value in config.items():
+ if isinstance(value, dict):
+ self.deepspeed_config_process(
+ prefix=prefix + key + ".", mismatches=mismatches, config=value, must_match=must_match, **kwargs
+ )
+ else:
+ self.fill_match(prefix + key, mismatches, must_match=must_match, **kwargs)
+ if len(mismatches) > 0 and prefix == "":
+ mismatches_msg = "\n".join(mismatches)
+ raise ValueError(
+ "Please correct the following DeepSpeed config values that mismatch kwargs "
+ f" values:\n{mismatches_msg}\nThe easiest method is to set these DeepSpeed config values to 'auto'."
+ )
+
+ def set_mixed_precision(self, mixed_precision):
+ ds_config = self.deepspeed_config
+ kwargs = {
+ "fp16.enabled": mixed_precision == "fp16",
+ "bf16.enabled": mixed_precision == "bf16",
+ }
+ if mixed_precision == "fp16":
+ if "fp16" not in ds_config:
+ ds_config["fp16"] = {"enabled": True, "auto_cast": True}
+ elif mixed_precision == "bf16":
+ if "bf16" not in ds_config:
+ ds_config["bf16"] = {"enabled": True}
+
+ if mixed_precision != "no":
+ diff_dtype = "bf16" if mixed_precision == "fp16" else "fp16"
+ if str(ds_config.get(diff_dtype, {}).get("enabled", "False")).lower() == "true":
+ raise ValueError(
+ f"`--mixed_precision` arg cannot be set to `{mixed_precision}` when `{diff_dtype}` is set in the DeepSpeed config file."
+ )
+ for dtype in ["fp16", "bf16"]:
+ if dtype not in ds_config:
+ ds_config[dtype] = {"enabled": False}
+ self.fill_match("fp16.enabled", must_match=False, **kwargs)
+ self.fill_match("bf16.enabled", must_match=False, **kwargs)
+
+ def set_deepspeed_weakref(self):
+ from .imports import is_transformers_available
+
+ if self.zero3_init_flag:
+ if not is_transformers_available():
+ raise Exception(
+ "When `zero3_init_flag` is set, it requires Transformers to be installed. "
+ "Please run `pip install transformers`."
+ )
+ ds_config = copy.deepcopy(self.deepspeed_config)
+ if "gradient_accumulation_steps" not in ds_config or ds_config["gradient_accumulation_steps"] == "auto":
+ ds_config["gradient_accumulation_steps"] = 1
+ if (
+ "train_micro_batch_size_per_gpu" not in ds_config
+ or ds_config["train_micro_batch_size_per_gpu"] == "auto"
+ ):
+ ds_config["train_micro_batch_size_per_gpu"] = 1
+ if ds_config.get("train_batch_size", None) == "auto":
+ del ds_config["train_batch_size"]
+
+ if compare_versions("transformers", "<", "4.33"):
+ from transformers.deepspeed import HfDeepSpeedConfig
+ else:
+ from transformers.integrations import HfDeepSpeedConfig
+
+ self.dschf = HfDeepSpeedConfig(ds_config) # keep this object alive # noqa
+
+ def is_zero3_init_enabled(self):
+ return self.zero3_init_flag
+
+ @contextmanager
+ def zero3_init_context_manager(self, enable=False):
+ old = self.zero3_init_flag
+ if old == enable:
+ yield
+ else:
+ self.zero3_init_flag = enable
+ self.dschf = None
+ self.set_deepspeed_weakref()
+ yield
+ self.zero3_init_flag = old
+ self.dschf = None
+ self.set_deepspeed_weakref()
+
+ def _deepspeed_config_checks(self):
+ env_variable_names_to_ignore = [
+ "ACCELERATE_GRADIENT_ACCUMULATION_STEPS",
+ "ACCELERATE_GRADIENT_CLIPPING",
+ "ACCELERATE_DEEPSPEED_ZERO_STAGE",
+ "ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE",
+ "ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE",
+ "ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_NVME_PATH",
+ "ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_NVME_PATH",
+ "ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL",
+ "ACCELERATE_MIXED_PRECISION",
+ ]
+ env_variable_names_to_ignore = [
+ name.replace("ACCELERATE_", "").replace("DEEPSPEED_", "").lower() for name in env_variable_names_to_ignore
+ ]
+
+ deepspeed_fields_from_accelerate_config = os.environ.get("ACCELERATE_CONFIG_DS_FIELDS", "").split(",")
+
+ if any(name in env_variable_names_to_ignore for name in deepspeed_fields_from_accelerate_config):
+ raise ValueError(
+ f"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\n"
+ "Please specify them appropriately in the DeepSpeed config file.\n"
+ "If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\n"
+ "The easiest method is to create a new config following the questionnaire via `accelerate config`.\n"
+ "It will only ask for the necessary config variables when using `deepspeed_config_file`."
+ )
+
+
+@dataclass
+class FullyShardedDataParallelPlugin:
+ """
+ This plugin is used to enable fully sharded data parallelism.
+ """
+
+ sharding_strategy: "typing.Any" = field(
+ default=None,
+ metadata={
+ "help": "FSDP Sharding Strategy of type `torch.distributed.fsdp.fully_sharded_data_parallel.ShardingStrategy`"
+ },
+ )
+ backward_prefetch: "typing.Any" = field(
+ default=None,
+ metadata={
+ "help": "FSDP Backward Prefetch of type `torch.distributed.fsdp.fully_sharded_data_parallel.BackwardPrefetch`"
+ },
+ )
+ mixed_precision_policy: "typing.Any" = field(
+ default=None,
+ metadata={
+ "help": "A config to enable mixed precision training with FullyShardedDataParallel. "
+ "The 3 flags that are set are `param_dtype`, `reduce_dtype`, `buffer_dtype`. "
+ "Each flag expects `torch.dtype` as the value. "
+ "It is of type `torch.distributed.fsdp.fully_sharded_data_parallel.MixedPrecision`."
+ },
+ )
+ auto_wrap_policy: Optional[Callable] = field(
+ default=None,
+ metadata={"help": "A callable specifying a policy to recursively wrap layers with FSDP"},
+ )
+ cpu_offload: "typing.Any" = field(
+ default=None,
+ metadata={
+ "help": "Decides Whether to offload parameters and gradients to CPU. "
+ "It is of type `torch.distributed.fsdp.fully_sharded_data_parallel.CPUOffload`."
+ },
+ )
+ ignored_modules: Optional[Iterable[torch.nn.Module]] = field(
+ default=None,
+ metadata={"help": "A list of modules to ignore for FSDP."},
+ )
+ state_dict_type: "typing.Any" = field(
+ default=None,
+ metadata={
+ "help": "FSDP State Dict Type of type `torch.distributed.fsdp.fully_sharded_data_parallel.StateDictType`"
+ },
+ )
+ state_dict_config: "typing.Any" = field(
+ default=None,
+ metadata={
+ "help": "FSDP State Dict Config of type `torch.distributed.fsdp.fully_sharded_data_parallel.StateDictConfig`"
+ },
+ )
+ optim_state_dict_config: "typing.Any" = field(
+ default=None,
+ metadata={
+ "help": "FSDP Optimizer State Dict Config of type `torch.distributed.fsdp.fully_sharded_data_parallel.OptimStateDictConfig`"
+ },
+ )
+ limit_all_gathers: bool = field(
+ default=True,
+ metadata={
+ "help": "If False, then FSDP allows the CPU thread to schedule all-gathers "
+ "without any extra synchronization. If True, then FSDP explicitly synchronizes the CPU thread to prevent "
+ "too many in-flight all-gathers. This bool only affects the sharded strategies that schedule all-gathers. "
+ "Enabling this can help lower the number of CUDA malloc retries."
+ },
+ )
+ use_orig_params: bool = field(
+ default=True,
+ metadata={
+ "help": "If `True`, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable parameters. "
+ "Useful in cases such as parameter-efficient fine-tuning. "
+ "Please refer this [blog](https://dev-discuss.pytorch.org/t/rethinking-pytorch-fully-sharded-data-parallel-fsdp-from-first-principles/1019). "
+ "This also enables multiple optimizer param groups. This should be `True` when creating an optimizer object before preparing/wrapping the model with FSDP."
+ },
+ )
+ param_init_fn: Optional[Callable[[torch.nn.Module], None]] = field(
+ default=None,
+ metadata={
+ "help": "A Callable[torch.nn.Module] -> None that specifies how modules "
+ "that are currently on the meta device should be initialized onto an actual device."
+ },
+ )
+ sync_module_states: bool = field(
+ default=True,
+ metadata={
+ "help": "If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0 "
+ "to ensure they are the same across all ranks after initialization"
+ },
+ )
+ forward_prefetch: bool = field(
+ default=False,
+ metadata={
+ "help": "If True, then FSDP explicitly prefetches the next upcoming "
+ "all-gather while executing in the forward pass. only use with Static graphs."
+ },
+ )
+ activation_checkpointing: bool = field(
+ default=False,
+ metadata={
+ "help": "If True, activation checkpointing is a technique to reduce memory usage by clearing activations of "
+ "certain layers and recomputing them during a backward pass. Effectively, this trades extra computation time "
+ "for reduced memory usage."
+ },
+ )
+
+ def __post_init__(self):
+ from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch, CPUOffload, ShardingStrategy
+
+ prefix = "FSDP_"
+ if self.sharding_strategy is None:
+ sharding_strategy = os.environ.get(prefix + "SHARDING_STRATEGY", "FULL_SHARD")
+ sharding_strategy = (
+ FSDP_SHARDING_STRATEGY.index(sharding_strategy) + 1
+ if not sharding_strategy.isdigit()
+ else int(sharding_strategy)
+ )
+ self.sharding_strategy = ShardingStrategy(sharding_strategy)
+
+ if self.cpu_offload is None:
+ if str_to_bool(os.environ.get(prefix + "OFFLOAD_PARAMS", "False")) == 1:
+ self.cpu_offload = CPUOffload(offload_params=True)
+ else:
+ self.cpu_offload = CPUOffload(offload_params=False)
+
+ if self.backward_prefetch is None:
+ prefetch_policy = os.environ.get(prefix + "BACKWARD_PREFETCH", "NO_PREFETCH")
+ if prefetch_policy != FSDP_BACKWARD_PREFETCH[-1]:
+ self.backward_prefetch = BackwardPrefetch(FSDP_BACKWARD_PREFETCH.index(prefetch_policy) + 1)
+
+ if self.state_dict_type is None:
+ state_dict_type_policy = os.environ.get(prefix + "STATE_DICT_TYPE", "FULL_STATE_DICT")
+ self.set_state_dict_type(state_dict_type_policy)
+ self.use_orig_params = str_to_bool(os.environ.get(prefix + "USE_ORIG_PARAMS", "False")) == 1
+ self.sync_module_states = str_to_bool(os.environ.get(prefix + "SYNC_MODULE_STATES", "True")) == 1
+ self.forward_prefetch = str_to_bool(os.environ.get(prefix + "FORWARD_PREFETCH", "False")) == 1
+ self.activation_checkpointing = str_to_bool(os.environ.get(prefix + "ACTIVATION_CHECKPOINTING", "False")) == 1
+
+ if self.sync_module_states:
+ if is_npu_available():
+ device = torch.npu.current_device()
+ elif is_cuda_available():
+ device = torch.cuda.current_device()
+ elif is_xpu_available():
+ device = torch.xpu.current_device()
+ else:
+ raise RuntimeError(
+ "There are currently no available devices found, must be one of 'XPU', 'CUDA', or 'NPU'."
+ )
+ self.param_init_fn = lambda x: x.to_empty(device=device, recurse=False)
+
+ @staticmethod
+ def get_module_class_from_name(module, name):
+ """
+ Gets a class from a module by its name.
+
+ Args:
+ module (`torch.nn.Module`): The module to get the class from.
+ name (`str`): The name of the class.
+ """
+ modules_children = list(module.children())
+ if module.__class__.__name__ == name:
+ return module.__class__
+ elif len(modules_children) == 0:
+ return
+ else:
+ for child_module in modules_children:
+ module_class = FullyShardedDataParallelPlugin.get_module_class_from_name(child_module, name)
+ if module_class is not None:
+ return module_class
+
+ def set_auto_wrap_policy(self, model):
+ from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy
+
+ default_transformer_cls_names_to_wrap = (
+ ",".join(model._no_split_modules) if getattr(model, "_no_split_modules", None) is not None else ""
+ )
+ if self.auto_wrap_policy is None:
+ auto_wrap_policy = os.environ.get("FSDP_AUTO_WRAP_POLICY", "NO_WRAP")
+ if auto_wrap_policy == FSDP_AUTO_WRAP_POLICY[0]:
+ transformer_cls_names_to_wrap = os.environ.get(
+ "FSDP_TRANSFORMER_CLS_TO_WRAP", default_transformer_cls_names_to_wrap
+ ).split(",")
+ transformer_cls_to_wrap = set()
+ for layer_class in transformer_cls_names_to_wrap:
+ transformer_cls = FullyShardedDataParallelPlugin.get_module_class_from_name(model, layer_class)
+ if transformer_cls is None:
+ raise Exception("Could not find the transformer layer class to wrap in the model.")
+ else:
+ transformer_cls_to_wrap.add(transformer_cls)
+
+ self.auto_wrap_policy = functools.partial(
+ transformer_auto_wrap_policy,
+ # Transformer layer class to wrap
+ transformer_layer_cls=transformer_cls_to_wrap,
+ )
+ elif auto_wrap_policy == FSDP_AUTO_WRAP_POLICY[1]:
+ min_num_params = int(os.environ.get("FSDP_MIN_NUM_PARAMS", 0))
+ if min_num_params > 0:
+ self.auto_wrap_policy = functools.partial(
+ size_based_auto_wrap_policy, min_num_params=min_num_params
+ )
+
+ def set_mixed_precision(self, mixed_precision, buffer_autocast=False, override=False):
+ if isinstance(mixed_precision, str):
+ if mixed_precision == "fp16":
+ dtype = torch.float16
+ elif mixed_precision == "bf16":
+ dtype = torch.bfloat16
+ elif mixed_precision == "fp32":
+ dtype = torch.float32
+ else:
+ raise ValueError(f"Unknown mixed precision value: {mixed_precision}")
+ else:
+ dtype = mixed_precision
+
+ buffer_dtype = torch.float32 if buffer_autocast else dtype
+ from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
+
+ if self.mixed_precision_policy is None or override:
+ self.mixed_precision_policy = MixedPrecision(
+ param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=buffer_dtype
+ )
+
+ def set_state_dict_type(self, state_dict_type_policy):
+ from torch.distributed.fsdp.fully_sharded_data_parallel import (
+ FullOptimStateDictConfig,
+ FullStateDictConfig,
+ StateDictType,
+ )
+
+ self.state_dict_type = StateDictType(FSDP_STATE_DICT_TYPE.index(state_dict_type_policy) + 1)
+
+ if self.state_dict_type == StateDictType.FULL_STATE_DICT:
+ if self.state_dict_config is None:
+ self.state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
+ if self.optim_state_dict_config is None:
+ self.optim_state_dict_config = FullOptimStateDictConfig(offload_to_cpu=True, rank0_only=True)
+
+
+@dataclass
+class MegatronLMPlugin:
+ """
+ Plugin for Megatron-LM to enable tensor, pipeline, sequence and data parallelism. Also to enable selective
+ activation recomputation and optimized fused kernels.
+ """
+
+ tp_degree: int = field(default=None, metadata={"help": "tensor parallelism degree."})
+ pp_degree: int = field(default=None, metadata={"help": "pipeline parallelism degree."})
+ num_micro_batches: int = field(default=None, metadata={"help": "number of micro-batches."})
+ gradient_clipping: float = field(
+ default=None, metadata={"help": "gradient clipping value based on global L2 Norm (0 to disable)"}
+ )
+ sequence_parallelism: bool = field(
+ default=None,
+ metadata={"help": "enable sequence parallelism"},
+ )
+ recompute_activations: bool = field(
+ default=None,
+ metadata={"help": "enable selective activation recomputation"},
+ )
+ use_distributed_optimizer: bool = field(
+ default=None,
+ metadata={"help": "enable distributed optimizer"},
+ )
+ pipeline_model_parallel_split_rank: int = field(
+ default=None, metadata={"help": "Rank where encoder and decoder should be split."}
+ )
+ num_layers_per_virtual_pipeline_stage: int = field(
+ default=None, metadata={"help": "Number of layers per virtual pipeline stage."}
+ )
+ is_train_batch_min: str = field(
+ default=True,
+ metadata={"help": "If both train & eval dataloaders are specified, this will decide the micro_batch_size"},
+ )
+ train_iters: int = field(
+ default=None,
+ metadata={
+ "help": "Total number of iterations to train over all training runs. "
+ "Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`"
+ },
+ )
+ train_samples: int = field(
+ default=None,
+ metadata={
+ "help": "Total number of samples to train over all training runs. "
+ "Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`"
+ },
+ )
+ weight_decay_incr_style: str = field(
+ default="constant",
+ metadata={"help": 'Weight decay increment function. choices=["constant", "linear", "cosine"]. '},
+ )
+ start_weight_decay: float = field(
+ default=None,
+ metadata={"help": "Initial weight decay coefficient for L2 regularization."},
+ )
+ end_weight_decay: float = field(
+ default=None,
+ metadata={"help": "End of run weight decay coefficient for L2 regularization."},
+ )
+ lr_decay_style: str = field(
+ default="linear",
+ metadata={"help": "Learning rate decay function. choices=['constant', 'linear', 'cosine']."},
+ )
+ lr_decay_iters: int = field(
+ default=None,
+ metadata={"help": "Number of iterations for learning rate decay. If None defaults to `train_iters`."},
+ )
+ lr_decay_samples: int = field(
+ default=None,
+ metadata={"help": "Number of samples for learning rate decay. If None defaults to `train_samples`."},
+ )
+ lr_warmup_iters: int = field(
+ default=None,
+ metadata={"help": "number of iterations to linearly warmup learning rate over."},
+ )
+ lr_warmup_samples: int = field(
+ default=None,
+ metadata={"help": "number of samples to linearly warmup learning rate over."},
+ )
+ lr_warmup_fraction: float = field(
+ default=None,
+ metadata={"help": "fraction of lr-warmup-(iters/samples) to linearly warmup learning rate over."},
+ )
+ min_lr: float = field(
+ default=0,
+ metadata={"help": "Minumum value for learning rate. The scheduler clip values below this threshold."},
+ )
+ consumed_samples: List[int] = field(
+ default=None,
+ metadata={
+ "help": "Number of samples consumed in the same order as the dataloaders to `accelerator.prepare` call."
+ },
+ )
+ no_wd_decay_cond: Optional[Callable] = field(default=None, metadata={"help": "Condition to disable weight decay."})
+ scale_lr_cond: Optional[Callable] = field(default=None, metadata={"help": "Condition to scale learning rate."})
+ lr_mult: float = field(default=1.0, metadata={"help": "Learning rate multiplier."})
+ megatron_dataset_flag: bool = field(
+ default=False,
+ metadata={"help": "Whether the format of dataset follows Megatron-LM Indexed/Cached/MemoryMapped format."},
+ )
+ seq_length: int = field(
+ default=None,
+ metadata={"help": "Maximum sequence length to process."},
+ )
+ encoder_seq_length: int = field(
+ default=None,
+ metadata={"help": "Maximum sequence length to process for the encoder."},
+ )
+ decoder_seq_length: int = field(
+ default=None,
+ metadata={"help": "Maximum sequence length to process for the decoder."},
+ )
+ tensorboard_dir: str = field(
+ default=None,
+ metadata={"help": "Path to save tensorboard logs."},
+ )
+ set_all_logging_options: bool = field(
+ default=False,
+ metadata={"help": "Whether to set all logging options."},
+ )
+ eval_iters: int = field(
+ default=100, metadata={"help": "Number of iterations to run for evaluation validation/test for."}
+ )
+ eval_interval: int = field(
+ default=1000, metadata={"help": "Interval between running evaluation on validation set."}
+ )
+ return_logits: bool = field(
+ default=False,
+ metadata={"help": "Whether to return logits from the model."},
+ )
+
+ # custom train step args
+ custom_train_step_class: Optional[Any] = field(
+ default=None,
+ metadata={"help": "Custom train step class."},
+ )
+ custom_train_step_kwargs: Optional[Dict[str, Any]] = field(
+ default=None,
+ metadata={"help": "Custom train step kwargs."},
+ )
+
+ # custom model args
+ custom_model_provider_function: Optional[Callable] = field(
+ default=None,
+ metadata={"help": "Custom model provider function."},
+ )
+ custom_prepare_model_function: Optional[Callable] = field(
+ default=None,
+ metadata={"help": "Custom prepare model function."},
+ )
+
+ # remaining args such as enabling Alibi/ROPE positional embeddings,
+ # wandb logging, Multi-Query Attention, etc.
+ other_megatron_args: Optional[Dict[str, Any]] = field(
+ default=None,
+ metadata={"help": "Other Megatron-LM arguments. Please refer Megatron-LM"},
+ )
+
+ def __post_init__(self):
+ prefix = "MEGATRON_LM_"
+ if self.tp_degree is None:
+ self.tp_degree = int(os.environ.get(prefix + "TP_DEGREE", 1))
+ if self.pp_degree is None:
+ self.pp_degree = int(os.environ.get(prefix + "PP_DEGREE", 1))
+ if self.num_micro_batches is None:
+ self.num_micro_batches = int(os.environ.get(prefix + "NUM_MICRO_BATCHES", 1))
+ if self.gradient_clipping is None:
+ self.gradient_clipping = float(os.environ.get(prefix + "GRADIENT_CLIPPING", 1.0))
+ if self.recompute_activations is None:
+ self.recompute_activations = str_to_bool(os.environ.get(prefix + "RECOMPUTE_ACTIVATIONS", "False")) == 1
+ if self.use_distributed_optimizer is None:
+ self.use_distributed_optimizer = (
+ str_to_bool(os.environ.get(prefix + "USE_DISTRIBUTED_OPTIMIZER", "False")) == 1
+ )
+ if self.sequence_parallelism is None:
+ self.sequence_parallelism = str_to_bool(os.environ.get(prefix + "SEQUENCE_PARALLELISM", "False")) == 1
+
+ if self.pp_degree > 1 or self.use_distributed_optimizer:
+ self.DDP_impl = "local"
+ else:
+ self.DDP_impl = "torch"
+
+ if self.consumed_samples is not None:
+ if len(self.consumed_samples) == 1:
+ self.consumed_samples.extend([0, 0])
+ elif len(self.consumed_samples) == 2:
+ self.consumed_samples.append(0)
+
+ self.megatron_lm_default_args = {
+ "tensor_model_parallel_size": self.tp_degree,
+ "pipeline_model_parallel_size": self.pp_degree,
+ "pipeline_model_parallel_split_rank": self.pipeline_model_parallel_split_rank,
+ "num_layers_per_virtual_pipeline_stage": self.num_layers_per_virtual_pipeline_stage,
+ "DDP_impl": self.DDP_impl,
+ "use_distributed_optimizer": self.use_distributed_optimizer,
+ "sequence_parallel": self.sequence_parallelism,
+ "clip_grad": self.gradient_clipping,
+ "num_micro_batches": self.num_micro_batches,
+ "consumed_samples": self.consumed_samples,
+ "no_wd_decay_cond": self.no_wd_decay_cond,
+ "scale_lr_cond": self.scale_lr_cond,
+ "lr_mult": self.lr_mult,
+ "megatron_dataset_flag": self.megatron_dataset_flag,
+ "eval_iters": self.eval_iters,
+ "eval_interval": self.eval_interval,
+ }
+ if self.recompute_activations:
+ self.megatron_lm_default_args["recompute_granularity"] = "selective"
+ if self.tensorboard_dir is not None:
+ self.megatron_lm_default_args["tensorboard_dir"] = self.tensorboard_dir
+ if self.set_all_logging_options:
+ self.set_tensorboard_logging_options()
+ if self.other_megatron_args is not None:
+ self.megatron_lm_default_args.update(self.other_megatron_args)
+
+ def set_network_size_args(self, model, batch_data=None):
+ # Check if the model is either BERT, GPT or T5 else raise error
+ # set 'num_layers', 'hidden_size', 'num_attention_heads', 'max_position_embeddings'
+ if "megatron-bert" in model.config.model_type.lower():
+ model_type_name = "bert"
+ num_layers = model.config.num_hidden_layers
+ hidden_size = model.config.hidden_size
+ num_attention_heads = model.config.num_attention_heads
+ max_position_embeddings = model.config.max_position_embeddings
+ num_labels = model.config.num_labels
+ orig_vocab_size = model.config.vocab_size
+ if "maskedlm" in model.__class__.__name__.lower():
+ pretraining_flag = True
+ if self.seq_length is not None:
+ if self.encoder_seq_length is not None:
+ warnings.warn("Both `seq_length` and `encoder_seq_length` are set. Using `encoder_seq_length`.")
+ self.seq_length = self.encoder_seq_length
+ elif self.encoder_seq_length is not None:
+ self.seq_length = self.encoder_seq_length
+ elif batch_data is not None:
+ self.seq_length = batch_data["input_ids"].shape[1]
+ else:
+ self.seq_length = max_position_embeddings
+ self.megatron_lm_default_args["seq_length"] = self.seq_length
+ elif "gpt2" in model.config.model_type.lower():
+ model_type_name = "gpt"
+ num_layers = model.config.n_layer
+ hidden_size = model.config.n_embd
+ num_attention_heads = model.config.n_head
+ max_position_embeddings = model.config.n_positions
+ orig_vocab_size = model.config.vocab_size
+ pretraining_flag = True
+ if self.seq_length is not None:
+ if self.decoder_seq_length is not None:
+ warnings.warn("Both `seq_length` and `decoder_seq_length` are set. Using `decoder_seq_length`.")
+ self.seq_length = self.decoder_seq_length
+ elif self.decoder_seq_length is not None:
+ self.seq_length = self.decoder_seq_length
+ elif batch_data is not None:
+ self.seq_length = batch_data["input_ids"].shape[1]
+ else:
+ self.seq_length = max_position_embeddings
+ self.megatron_lm_default_args["seq_length"] = self.seq_length
+ self.megatron_lm_default_args["return_logits"] = self.return_logits
+ self.megatron_lm_default_args["tokenizer_type"] = "GPT2BPETokenizer"
+ elif "t5" in model.config.model_type.lower():
+ model_type_name = "t5"
+ num_layers = model.config.num_layers
+ hidden_size = model.config.d_model
+ num_attention_heads = model.config.num_heads
+ max_position_embeddings = model.config.n_positions if hasattr(model.config, "n_positions") else 1024
+ orig_vocab_size = model.config.vocab_size
+ pretraining_flag = True
+ if self.encoder_seq_length is None:
+ if batch_data is not None:
+ self.encoder_seq_length = batch_data["input_ids"].shape[1]
+ else:
+ self.encoder_seq_length = max_position_embeddings
+ if self.decoder_seq_length is None:
+ if batch_data is not None:
+ self.decoder_seq_length = batch_data["labels"].shape[1]
+ else:
+ self.decoder_seq_length = max_position_embeddings
+
+ self.megatron_lm_default_args["encoder_seq_length"] = self.encoder_seq_length
+ self.megatron_lm_default_args["decoder_seq_length"] = self.decoder_seq_length
+ else:
+ raise ValueError(
+ "🤗 Accelerate Megatron-LM integration supports only BERT, GPT and T5 model. "
+ "Please check the model you are using is one of those."
+ )
+
+ self.megatron_lm_default_args["model_type_name"] = model_type_name
+ self.megatron_lm_default_args["num_layers"] = num_layers
+ self.megatron_lm_default_args["hidden_size"] = hidden_size
+ self.megatron_lm_default_args["num_attention_heads"] = num_attention_heads
+ self.megatron_lm_default_args["max_position_embeddings"] = max_position_embeddings
+ self.megatron_lm_default_args["pretraining_flag"] = pretraining_flag
+ self.megatron_lm_default_args["orig_vocab_size"] = orig_vocab_size
+ self.megatron_lm_default_args["model_return_dict"] = model.config.return_dict
+ if model_type_name == "bert":
+ self.megatron_lm_default_args["num_labels"] = num_labels
+
+ def set_mixed_precision(self, mixed_precision):
+ if mixed_precision == "fp16":
+ self.megatron_lm_default_args["fp16"] = True
+ elif mixed_precision == "bf16":
+ self.megatron_lm_default_args["bf16"] = True
+ self.DDP_impl = "local"
+ self.megatron_lm_default_args["DDP_impl"] = self.DDP_impl
+
+ def set_training_args(self, micro_batch_size, dp_degree):
+ self.data_parallel_size = dp_degree
+ self.micro_batch_size = micro_batch_size
+ self.global_batch_size = dp_degree * micro_batch_size * self.num_micro_batches
+ self.megatron_lm_default_args["data_parallel_size"] = self.data_parallel_size
+ self.megatron_lm_default_args["micro_batch_size"] = self.micro_batch_size
+ self.megatron_lm_default_args["global_batch_size"] = self.global_batch_size
+
+ def set_optimizer_type(self, optimizer):
+ optimizer_name = optimizer.__class__.__name__.lower()
+ if "adam" in optimizer_name:
+ self.megatron_lm_default_args["optimizer"] = "adam"
+ self.megatron_lm_default_args["adam_beta1"] = optimizer.defaults["betas"][0]
+ self.megatron_lm_default_args["adam_beta2"] = optimizer.defaults["betas"][1]
+ self.megatron_lm_default_args["adam_eps"] = optimizer.defaults["eps"]
+ elif "sgd" in optimizer_name:
+ self.megatron_lm_default_args["optimizer"] = "sgd"
+ self.megatron_lm_default_args["sgd_momentum"] = optimizer.defaults["momentum"]
+ else:
+ raise ValueError(f"Optimizer {optimizer_name} is not supported by Megatron-LM")
+
+ self.megatron_lm_default_args["lr"] = optimizer.defaults["lr"]
+ self.megatron_lm_default_args["weight_decay"] = optimizer.defaults["weight_decay"]
+
+ def set_scheduler_args(self, scheduler):
+ if self.train_iters is None:
+ self.train_iters = scheduler.total_num_steps // self.megatron_lm_default_args["data_parallel_size"]
+ if self.train_samples is not None:
+ self.train_samples = None
+ warnings.warn(
+ "Ignoring `train_samples` as `train_iters` based on scheduler is being used for training."
+ )
+ if self.lr_warmup_iters is None:
+ self.lr_warmup_iters = scheduler.warmup_num_steps // self.megatron_lm_default_args["data_parallel_size"]
+ if self.lr_warmup_samples is not None:
+ warnings.warn(
+ "Ignoring `lr_warmup_samples` as `lr_warmup_iters` based on scheduler is being used for training."
+ )
+ self.lr_warmup_samples = 0
+
+ self.megatron_lm_default_args["train_iters"] = self.train_iters
+ self.megatron_lm_default_args["lr_warmup_iters"] = self.lr_warmup_iters
+ self.megatron_lm_default_args["train_samples"] = self.train_samples
+ self.megatron_lm_default_args["lr_warmup_samples"] = self.lr_warmup_samples
+ self.megatron_lm_default_args["lr_decay_iters"] = self.lr_decay_iters
+ self.megatron_lm_default_args["lr_decay_samples"] = self.lr_decay_samples
+ self.megatron_lm_default_args["lr_warmup_fraction"] = self.lr_warmup_fraction
+ self.megatron_lm_default_args["lr_decay_style"] = self.lr_decay_style
+ self.megatron_lm_default_args["weight_decay_incr_style"] = self.weight_decay_incr_style
+ self.megatron_lm_default_args["start_weight_decay"] = self.start_weight_decay
+ self.megatron_lm_default_args["end_weight_decay"] = self.end_weight_decay
+ self.megatron_lm_default_args["min_lr"] = self.min_lr
+
+ def set_tensorboard_logging_options(self):
+ from megatron.arguments import _add_logging_args
+
+ parser = argparse.ArgumentParser()
+ parser = _add_logging_args(parser)
+ logging_args = parser.parse_known_args()
+ self.dataset_args = vars(logging_args[0])
+ for key, value in self.dataset_args.items():
+ if key.startswith("log_"):
+ self.megatron_lm_default_args[key] = True
+ elif key.startswith("no_log_"):
+ self.megatron_lm_default_args[key.replace("no_", "")] = True
+
+
+@dataclass
+class BnbQuantizationConfig:
+ """
+ A plugin to enable BitsAndBytes 4bit and 8bit quantization
+ """
+
+ load_in_8bit: bool = field(default=False, metadata={"help": "enable 8bit quantization."})
+
+ llm_int8_threshold: float = field(
+ default=6.0, metadata={"help": "value of the outliner threshold. only relevant when load_in_8bit=True"}
+ )
+
+ load_in_4bit: bool = field(default=False, metadata={"help": "enable 4bit quantization."})
+
+ bnb_4bit_quant_type: str = field(
+ default="fp4",
+ metadata={
+ "help": "set the quantization data type in the `bnb.nn.Linear4Bit` layers. Options are {'fp4','np4'}."
+ },
+ )
+
+ bnb_4bit_use_double_quant: bool = field(
+ default=False,
+ metadata={
+ "help": "enable nested quantization where the quantization constants from the first quantization are quantized again."
+ },
+ )
+
+ bnb_4bit_compute_dtype: bool = field(
+ default="fp16",
+ metadata={
+ "help": "This sets the computational type which might be different than the input time. For example, inputs might be "
+ "fp32, but computation can be set to bf16 for speedups. Options are {'fp32','fp16','bf16'}."
+ },
+ )
+
+ torch_dtype: torch.dtype = field(
+ default=None,
+ metadata={
+ "help": "this sets the dtype of the remaining non quantized layers. `bitsandbytes` library suggests to set the value"
+ "to `torch.float16` for 8 bit model and use the same dtype as the compute dtype for 4 bit model "
+ },
+ )
+
+ skip_modules: List[str] = field(
+ default=None,
+ metadata={
+ "help": "an explicit list of the modules that we don't quantize. The dtype of these modules will be `torch_dtype`."
+ },
+ )
+
+ keep_in_fp32_modules: List[str] = field(
+ default=None,
+ metadata={"help": "an explicit list of the modules that we don't quantize. We keep them in `torch.float32`."},
+ )
+
+ def __post_init__(self):
+ """
+ Safety checker that arguments are correct - also replaces some NoneType arguments with their default values.
+ """
+ if not isinstance(self.load_in_8bit, bool):
+ raise ValueError("load_in_8bit must be a boolean")
+
+ if not isinstance(self.load_in_4bit, bool):
+ raise ValueError("load_in_4bit must be a boolean")
+
+ if self.load_in_4bit and self.load_in_8bit:
+ raise ValueError("load_in_4bit and load_in_8 can't be both True")
+
+ if not self.load_in_4bit and not self.load_in_8bit:
+ raise ValueError("load_in_4bit and load_in_8 can't be both False")
+
+ if not isinstance(self.llm_int8_threshold, (int, float)):
+ raise ValueError("llm_int8_threshold must be a float or an int")
+
+ if not isinstance(self.bnb_4bit_quant_type, str):
+ raise ValueError("bnb_4bit_quant_type must be a string")
+ elif self.bnb_4bit_quant_type not in ["fp4", "nf4"]:
+ raise ValueError(f"bnb_4bit_quant_type must be in ['fp4','nf4'] but found {self.bnb_4bit_quant_type}")
+
+ if not isinstance(self.bnb_4bit_use_double_quant, bool):
+ raise ValueError("bnb_4bit_use_double_quant must be a boolean")
+
+ if isinstance(self.bnb_4bit_compute_dtype, str):
+ if self.bnb_4bit_compute_dtype == "fp32":
+ self.bnb_4bit_compute_dtype = torch.float32
+ elif self.bnb_4bit_compute_dtype == "fp16":
+ self.bnb_4bit_compute_dtype = torch.float16
+ elif self.bnb_4bit_compute_dtype == "bf16":
+ self.bnb_4bit_compute_dtype = torch.bfloat16
+ else:
+ raise ValueError(
+ f"bnb_4bit_compute_dtype must be in ['fp32','fp16','bf16'] but found {self.bnb_4bit_compute_dtype}"
+ )
+ elif not isinstance(self.bnb_4bit_compute_dtype, torch.dtype):
+ raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype")
+
+ if self.skip_modules is not None and not isinstance(self.skip_modules, list):
+ raise ValueError("skip_modules must be a list of strings")
+
+ if self.keep_in_fp32_modules is not None and not isinstance(self.keep_in_fp32_modules, list):
+ raise ValueError("keep_in_fp_32_modules must be a list of strings")
+
+ if self.load_in_4bit:
+ self.target_dtype = CustomDtype.INT4
+
+ if self.load_in_8bit:
+ self.target_dtype = torch.int8
+
+ if self.load_in_4bit and self.llm_int8_threshold != 6.0:
+ warnings.warn("llm_int8_threshold can only be used for model loaded in 8bit")
+
+ if isinstance(self.torch_dtype, str):
+ if self.torch_dtype == "fp32":
+ self.torch_dtype = torch.float32
+ elif self.torch_dtype == "fp16":
+ self.torch_dtype = torch.float16
+ elif self.torch_dtype == "bf16":
+ self.torch_dtype = torch.bfloat16
+ else:
+ raise ValueError(f"torch_dtype must be in ['fp32','fp16','bf16'] but found {self.torch_dtype}")
+ if self.load_in_8bit and self.torch_dtype is None:
+ self.torch_dtype = torch.float16
+
+ if self.load_in_4bit and self.torch_dtype is None:
+ self.torch_dtype = self.bnb_4bit_compute_dtype
+
+ if not isinstance(self.torch_dtype, torch.dtype):
+ raise ValueError("torch_dtype must be a torch.dtype")
diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/environment.py b/venv/lib/python3.10/site-packages/accelerate/utils/environment.py
new file mode 100644
index 0000000000000000000000000000000000000000..9fdbd323632769146188cac1e91d08ab1e2ba617
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/accelerate/utils/environment.py
@@ -0,0 +1,274 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import math
+import os
+import platform
+import subprocess
+import sys
+from dataclasses import dataclass, field
+from functools import lru_cache
+from shutil import which
+from typing import List, Optional
+
+import torch
+from packaging.version import parse
+
+
+logger = logging.getLogger(__name__)
+
+
+def convert_dict_to_env_variables(current_env: dict):
+ """
+ Verifies that all keys and values in `current_env` do not contain illegal keys or values, and returns a list of
+ strings as the result.
+
+ Example:
+ ```python
+ >>> from accelerate.utils.environment import verify_env
+
+ >>> env = {"ACCELERATE_DEBUG_MODE": "1", "BAD_ENV_NAME": ">> valid_env_items = verify_env(env)
+ >>> print(valid_env_items)
+ ["ACCELERATE_DEBUG_MODE=1\n", "OTHER_ENV=2\n"]
+ ```
+ """
+ forbidden_chars = [";", "\n", "<", ">", " "]
+ valid_env_items = []
+ for key, value in current_env.items():
+ if all(char not in (key + value) for char in forbidden_chars) and len(key) >= 1 and len(value) >= 1:
+ valid_env_items.append(f"{key}={value}\n")
+ else:
+ logger.warning(f"WARNING: Skipping {key}={value} as it contains forbidden characters or missing values.")
+ return valid_env_items
+
+
+def str_to_bool(value) -> int:
+ """
+ Converts a string representation of truth to `True` (1) or `False` (0).
+
+ True values are `y`, `yes`, `t`, `true`, `on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`;
+ """
+ value = value.lower()
+ if value in ("y", "yes", "t", "true", "on", "1"):
+ return 1
+ elif value in ("n", "no", "f", "false", "off", "0"):
+ return 0
+ else:
+ raise ValueError(f"invalid truth value {value}")
+
+
+def get_int_from_env(env_keys, default):
+ """Returns the first positive env value found in the `env_keys` list or the default."""
+ for e in env_keys:
+ val = int(os.environ.get(e, -1))
+ if val >= 0:
+ return val
+ return default
+
+
+def parse_flag_from_env(key, default=False):
+ """Returns truthy value for `key` from the env if available else the default."""
+ value = os.environ.get(key, str(default))
+ return str_to_bool(value) == 1 # As its name indicates `str_to_bool` actually returns an int...
+
+
+def parse_choice_from_env(key, default="no"):
+ value = os.environ.get(key, str(default))
+ return value
+
+
+def are_libraries_initialized(*library_names: str) -> List[str]:
+ """
+ Checks if any of `library_names` are imported in the environment. Will return any names that are.
+ """
+ return [lib_name for lib_name in library_names if lib_name in sys.modules.keys()]
+
+
+def _nvidia_smi():
+ """
+ Returns the right nvidia-smi command based on the system.
+ """
+ if platform.system() == "Windows":
+ # If platform is Windows and nvidia-smi can't be found in path
+ # try from systemd drive with default installation path
+ command = which("nvidia-smi")
+ if command is None:
+ command = "%s\\Program Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe" % os.environ["systemdrive"]
+ else:
+ command = "nvidia-smi"
+ return command
+
+
+def get_gpu_info():
+ """
+ Gets GPU count and names using `nvidia-smi` instead of torch to not initialize CUDA.
+
+ Largely based on the `gputil` library.
+ """
+ # Returns as list of `n` GPUs and their names
+ output = subprocess.check_output(
+ [_nvidia_smi(), "--query-gpu=count,name", "--format=csv,noheader"], universal_newlines=True
+ )
+ output = output.strip()
+ gpus = output.split(os.linesep)
+ # Get names from output
+ gpu_count = len(gpus)
+ gpu_names = [gpu.split(",")[1].strip() for gpu in gpus]
+ return gpu_names, gpu_count
+
+
+def get_driver_version():
+ """
+ Returns the driver version
+
+ In the case of multiple GPUs, will return the first.
+ """
+ output = subprocess.check_output(
+ [_nvidia_smi(), "--query-gpu=driver_version", "--format=csv,noheader"], universal_newlines=True
+ )
+ output = output.strip()
+ return output.split(os.linesep)[0]
+
+
+def check_cuda_p2p_ib_support():
+ """
+ Checks if the devices being used have issues with P2P and IB communications, namely any consumer GPU hardware after
+ the 3090.
+
+ Noteably uses `nvidia-smi` instead of torch to not initialize CUDA.
+ """
+ try:
+ device_names, device_count = get_gpu_info()
+ # As new consumer GPUs get released, add them to `unsupported_devices``
+ unsupported_devices = {"RTX 40"}
+ if device_count > 1:
+ if any(
+ unsupported_device in device_name
+ for device_name in device_names
+ for unsupported_device in unsupported_devices
+ ):
+ # Check if they have the right driver version
+ acceptable_driver_version = "550.40.07"
+ current_driver_version = get_driver_version()
+ if parse(current_driver_version) < parse(acceptable_driver_version):
+ return False
+ return True
+ except Exception:
+ pass
+ return True
+
+
+def check_fp8_capability():
+ """
+ Checks if all the current GPUs available support FP8.
+
+ Notably must initialize `torch.cuda` to check.
+ """
+ cuda_device_capacity = torch.cuda.get_device_capability()
+ return cuda_device_capacity >= (8, 9)
+
+
+@dataclass
+class CPUInformation:
+ """
+ Stores information about the CPU in a distributed environment. It contains the following attributes:
+ - rank: The rank of the current process.
+ - world_size: The total number of processes in the world.
+ - local_rank: The rank of the current process on the local node.
+ - local_world_size: The total number of processes on the local node.
+ """
+
+ rank: int = field(default=0, metadata={"help": "The rank of the current process."})
+ world_size: int = field(default=1, metadata={"help": "The total number of processes in the world."})
+ local_rank: int = field(default=0, metadata={"help": "The rank of the current process on the local node."})
+ local_world_size: int = field(default=1, metadata={"help": "The total number of processes on the local node."})
+
+
+def get_cpu_distributed_information() -> CPUInformation:
+ """
+ Returns various information about the environment in relation to CPU distributed training as a `CPUInformation`
+ dataclass.
+ """
+ information = {}
+ information["rank"] = get_int_from_env(["RANK", "PMI_RANK", "OMPI_COMM_WORLD_RANK", "MV2_COMM_WORLD_RANK"], 0)
+ information["world_size"] = get_int_from_env(
+ ["WORLD_SIZE", "PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE"], 1
+ )
+ information["local_rank"] = get_int_from_env(
+ ["LOCAL_RANK", "MPI_LOCALRANKID", "OMPI_COMM_WORLD_LOCAL_RANK", "MV2_COMM_WORLD_LOCAL_RANK"], 0
+ )
+ information["local_world_size"] = get_int_from_env(
+ ["LOCAL_WORLD_SIZE", "MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"],
+ 1,
+ )
+ return CPUInformation(**information)
+
+
+def override_numa_affinity(local_process_index: int, verbose: Optional[bool] = None) -> None:
+ """
+ Overrides whatever NUMA affinity is set for the current process. This is very taxing and requires recalculating the
+ affinity to set, ideally you should use `utils.environment.set_numa_affinity` instead.
+
+ Args:
+ local_process_index (int):
+ The index of the current process on the current server.
+ verbose (bool, *optional*):
+ Whether to log out the assignment of each CPU. If `ACCELERATE_DEBUG_MODE` is enabled, will default to True.
+ """
+ if verbose is None:
+ verbose = parse_flag_from_env("ACCELERATE_DEBUG_MODE", False)
+ if torch.cuda.is_available():
+ from accelerate.utils import is_pynvml_available
+
+ if not is_pynvml_available():
+ raise ImportError(
+ "To set CPU affinity on CUDA GPUs the `pynvml` package must be available. (`pip install pynvml`)"
+ )
+ import pynvml as nvml
+
+ # The below code is based on https://github.com/NVIDIA/DeepLearningExamples/blob/master/TensorFlow2/LanguageModeling/BERT/gpu_affinity.py
+ nvml.nvmlInit()
+ num_elements = math.ceil(os.cpu_count() / 64)
+ handle = nvml.nvmlDeviceGetHandleByIndex(local_process_index)
+ affinity_string = ""
+ for j in nvml.nvmlDeviceGetCpuAffinity(handle, num_elements):
+ # assume nvml returns list of 64 bit ints
+ affinity_string = f"{j:064b}{affinity_string}"
+ affinity_list = [int(x) for x in affinity_string]
+ affinity_list.reverse() # so core 0 is the 0th element
+ affinity_to_set = [i for i, e in enumerate(affinity_list) if e != 0]
+ os.sched_setaffinity(0, affinity_to_set)
+ if verbose:
+ cpu_cores = os.sched_getaffinity(0)
+ logger.info(f"Assigning {len(cpu_cores)} cpu cores to process {local_process_index}: {cpu_cores}")
+
+
+@lru_cache
+def set_numa_affinity(local_process_index: int, verbose: Optional[bool] = None) -> None:
+ """
+ Assigns the current process to a specific NUMA node. Ideally most efficient when having at least 2 cpus per node.
+
+ This result is cached between calls. If you want to override it, please use
+ `accelerate.utils.environment.override_numa_afifnity`.
+
+ Args:
+ local_process_index (int):
+ The index of the current process on the current server.
+ verbose (bool, *optional*):
+ Whether to print the new cpu cores assignment for each process. If `ACCELERATE_DEBUG_MODE` is enabled, will
+ default to True.
+ """
+ override_numa_affinity(local_process_index=local_process_index, verbose=verbose)
diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/memory.py b/venv/lib/python3.10/site-packages/accelerate/utils/memory.py
new file mode 100644
index 0000000000000000000000000000000000000000..0141bf5f60430fa521de6cf196ac511a50790bb3
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/accelerate/utils/memory.py
@@ -0,0 +1,158 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+A collection of utilities for ensuring that training can always occur. Heavily influenced by the
+[toma](https://github.com/BlackHC/toma) library.
+"""
+
+import functools
+import gc
+import inspect
+
+import torch
+
+from .imports import is_mlu_available, is_mps_available, is_npu_available, is_xpu_available
+
+
+def release_memory(*objects):
+ """
+ Releases memory from `objects` by setting them to `None` and calls `gc.collect()` and `torch.cuda.empty_cache()`.
+ Returned objects should be reassigned to the same variables.
+
+ Args:
+ objects (`Iterable`):
+ An iterable of objects
+ Returns:
+ A list of `None` objects to replace `objects`
+
+ Example:
+
+ ```python
+ >>> import torch
+ >>> from accelerate.utils import release_memory
+
+ >>> a = torch.ones(1000, 1000).cuda()
+ >>> b = torch.ones(1000, 1000).cuda()
+ >>> a, b = release_memory(a, b)
+ ```
+ """
+ if not isinstance(objects, list):
+ objects = list(objects)
+ for i in range(len(objects)):
+ objects[i] = None
+ gc.collect()
+ if is_xpu_available():
+ torch.xpu.empty_cache()
+ elif is_mlu_available():
+ torch.mlu.empty_cache()
+ elif is_npu_available():
+ torch.npu.empty_cache()
+ elif is_mps_available():
+ torch.mps.empty_cache()
+ else:
+ torch.cuda.empty_cache()
+ return objects
+
+
+def should_reduce_batch_size(exception: Exception) -> bool:
+ """
+ Checks if `exception` relates to CUDA out-of-memory, CUDNN not supported, or CPU out-of-memory
+
+ Args:
+ exception (`Exception`):
+ An exception
+ """
+ _statements = [
+ "CUDA out of memory.", # CUDA OOM
+ "cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
+ "DefaultCPUAllocator: can't allocate memory", # CPU OOM
+ ]
+ if isinstance(exception, RuntimeError) and len(exception.args) == 1:
+ return any(err in exception.args[0] for err in _statements)
+ return False
+
+
+def find_executable_batch_size(function: callable = None, starting_batch_size: int = 128):
+ """
+ A basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or
+ CUDNN, the batch size is cut in half and passed to `function`
+
+ `function` must take in a `batch_size` parameter as its first argument.
+
+ Args:
+ function (`callable`, *optional*):
+ A function to wrap
+ starting_batch_size (`int`, *optional*):
+ The batch size to try and fit into memory
+
+ Example:
+
+ ```python
+ >>> from accelerate.utils import find_executable_batch_size
+
+
+ >>> @find_executable_batch_size(starting_batch_size=128)
+ ... def train(batch_size, model, optimizer):
+ ... ...
+
+
+ >>> train(model, optimizer)
+ ```
+ """
+ if function is None:
+ return functools.partial(find_executable_batch_size, starting_batch_size=starting_batch_size)
+
+ batch_size = starting_batch_size
+
+ def decorator(*args, **kwargs):
+ nonlocal batch_size
+ gc.collect()
+ if is_xpu_available():
+ torch.xpu.empty_cache()
+ elif is_mlu_available():
+ torch.mlu.empty_cache()
+ elif is_npu_available():
+ torch.npu.empty_cache()
+ else:
+ torch.cuda.empty_cache()
+ params = list(inspect.signature(function).parameters.keys())
+ # Guard against user error
+ if len(params) < (len(args) + 1):
+ arg_str = ", ".join([f"{arg}={value}" for arg, value in zip(params[1:], args[1:])])
+ raise TypeError(
+ f"Batch size was passed into `{function.__name__}` as the first argument when called."
+ f"Remove this as the decorator already does so: `{function.__name__}({arg_str})`"
+ )
+ while True:
+ if batch_size == 0:
+ raise RuntimeError("No executable batch size found, reached zero.")
+ try:
+ return function(batch_size, *args, **kwargs)
+ except Exception as e:
+ if should_reduce_batch_size(e):
+ gc.collect()
+ if is_xpu_available():
+ torch.xpu.empty_cache()
+ elif is_mlu_available():
+ torch.mlu.empty_cache()
+ elif is_npu_available():
+ torch.npu.empty_cache()
+ else:
+ torch.cuda.empty_cache()
+ batch_size //= 2
+ else:
+ raise
+
+ return decorator
diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/offload.py b/venv/lib/python3.10/site-packages/accelerate/utils/offload.py
new file mode 100644
index 0000000000000000000000000000000000000000..d064847ca21bde644b443de315b239414aa2fd51
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/accelerate/utils/offload.py
@@ -0,0 +1,213 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import os
+from collections.abc import Mapping
+from typing import Dict, List, Optional, Union
+
+import numpy as np
+import torch
+from safetensors import safe_open
+
+
+def offload_weight(weight, weight_name, offload_folder, index=None):
+ dtype = None
+ # Check the string instead of the dtype to be compatible with versions of PyTorch that don't have bfloat16.
+ if str(weight.dtype) == "torch.bfloat16":
+ # Need to reinterpret the underlined data as int16 since NumPy does not handle bfloat16s.
+ weight = weight.view(torch.int16)
+ dtype = "bfloat16"
+ array = weight.cpu().numpy()
+ tensor_file = os.path.join(offload_folder, f"{weight_name}.dat")
+ if index is not None:
+ if dtype is None:
+ dtype = str(array.dtype)
+ index[weight_name] = {"dtype": dtype, "shape": list(array.shape)}
+ if array.ndim == 0:
+ array = array[None]
+ file_array = np.memmap(tensor_file, dtype=array.dtype, mode="w+", shape=array.shape)
+ file_array[:] = array[:]
+ file_array.flush()
+ return index
+
+
+def load_offloaded_weight(weight_file, weight_info):
+ shape = tuple(weight_info["shape"])
+ if shape == ():
+ # NumPy memory-mapped arrays can't have 0 dims so it was saved as 1d tensor
+ shape = (1,)
+
+ dtype = weight_info["dtype"]
+ if dtype == "bfloat16":
+ # NumPy does not support bfloat16 so this was saved as a int16
+ dtype = "int16"
+
+ weight = np.memmap(weight_file, dtype=dtype, shape=shape, mode="r")
+
+ if len(weight_info["shape"]) == 0:
+ weight = weight[0]
+ weight = torch.tensor(weight)
+ if weight_info["dtype"] == "bfloat16":
+ weight = weight.view(torch.bfloat16)
+
+ return weight
+
+
+def save_offload_index(index, offload_folder):
+ if index is None or len(index) == 0:
+ # Nothing to save
+ return
+
+ offload_index_file = os.path.join(offload_folder, "index.json")
+ if os.path.isfile(offload_index_file):
+ with open(offload_index_file, encoding="utf-8") as f:
+ current_index = json.load(f)
+ else:
+ current_index = {}
+ current_index.update(index)
+
+ with open(offload_index_file, "w", encoding="utf-8") as f:
+ json.dump(current_index, f, indent=2)
+
+
+def offload_state_dict(save_dir: Union[str, os.PathLike], state_dict: Dict[str, torch.Tensor]):
+ """
+ Offload a state dict in a given folder.
+
+ Args:
+ save_dir (`str` or `os.PathLike`):
+ The directory in which to offload the state dict.
+ state_dict (`Dict[str, torch.Tensor]`):
+ The dictionary of tensors to offload.
+ """
+ os.makedirs(save_dir, exist_ok=True)
+ index = {}
+ for name, parameter in state_dict.items():
+ index = offload_weight(parameter, name, save_dir, index=index)
+
+ # Update index
+ save_offload_index(index, save_dir)
+
+
+class PrefixedDataset(Mapping):
+ """
+ Will access keys in a given dataset by adding a prefix.
+
+ Args:
+ dataset (`Mapping`): Any map with string keys.
+ prefix (`str`): A prefix to add when trying to access any element in the underlying dataset.
+ """
+
+ def __init__(self, dataset: Mapping, prefix: str):
+ self.dataset = dataset
+ self.prefix = prefix
+
+ def __getitem__(self, key):
+ return self.dataset[f"{self.prefix}{key}"]
+
+ def __iter__(self):
+ return iter([key for key in self.dataset if key.startswith(self.prefix)])
+
+ def __len__(self):
+ return len(self.dataset)
+
+
+class OffloadedWeightsLoader(Mapping):
+ """
+ A collection that loads weights stored in a given state dict or memory-mapped on disk.
+
+ Args:
+ state_dict (`Dict[str, torch.Tensor]`, *optional*):
+ A dictionary parameter name to tensor.
+ save_folder (`str` or `os.PathLike`, *optional*):
+ The directory in which the weights are stored (by `offload_state_dict` for instance).
+ index (`Dict`, *optional*):
+ A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default
+ to the index saved in `save_folder`.
+ """
+
+ def __init__(
+ self,
+ state_dict: Dict[str, torch.Tensor] = None,
+ save_folder: Optional[Union[str, os.PathLike]] = None,
+ index: Mapping = None,
+ device=None,
+ ):
+ if state_dict is None and save_folder is None and index is None:
+ raise ValueError("Need either a `state_dict`, a `save_folder` or an `index` containing offloaded weights.")
+
+ self.state_dict = {} if state_dict is None else state_dict
+ self.save_folder = save_folder
+ if index is None and save_folder is not None:
+ with open(os.path.join(save_folder, "index.json")) as f:
+ index = json.load(f)
+ self.index = {} if index is None else index
+ self.all_keys = list(self.state_dict.keys())
+ self.all_keys.extend([key for key in self.index if key not in self.all_keys])
+ self.device = device
+
+ def __getitem__(self, key: str):
+ # State dict gets priority
+ if key in self.state_dict:
+ return self.state_dict[key]
+ weight_info = self.index[key]
+ if weight_info.get("safetensors_file") is not None:
+ device = "cpu" if self.device is None else self.device
+ tensor = None
+ try:
+ with safe_open(weight_info["safetensors_file"], framework="pt", device=device) as f:
+ tensor = f.get_tensor(weight_info.get("weight_name", key))
+ except TypeError:
+ # if failed to get_tensor on the device, such as bf16 on mps, try to load it on CPU first
+ with safe_open(weight_info["safetensors_file"], framework="pt", device="cpu") as f:
+ tensor = f.get_tensor(weight_info.get("weight_name", key))
+
+ if "dtype" in weight_info:
+ tensor = tensor.to(getattr(torch, weight_info["dtype"]))
+
+ if tensor.device != torch.device(device):
+ tensor = tensor.to(device)
+ return tensor
+
+ weight_file = os.path.join(self.save_folder, f"{key}.dat")
+ return load_offloaded_weight(weight_file, weight_info)
+
+ def __iter__(self):
+ return iter(self.all_keys)
+
+ def __len__(self):
+ return len(self.all_keys)
+
+
+def extract_submodules_state_dict(state_dict: Dict[str, torch.Tensor], submodule_names: List[str]):
+ """
+ Extract the sub state-dict corresponding to a list of given submodules.
+
+ Args:
+ state_dict (`Dict[str, torch.Tensor]`): The state dict to extract from.
+ submodule_names (`List[str]`): The list of submodule names we want to extract.
+ """
+ result = {}
+ for module_name in submodule_names:
+ # We want to catch module_name parameter (module_name.xxx) or potentially module_name, but not any of the
+ # submodules that could being like module_name (transformers.h.1 and transformers.h.10 for instance)
+ result.update(
+ {
+ key: param
+ for key, param in state_dict.items()
+ if key == module_name or key.startswith(module_name + ".")
+ }
+ )
+ return result
diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/operations.py b/venv/lib/python3.10/site-packages/accelerate/utils/operations.py
new file mode 100644
index 0000000000000000000000000000000000000000..e2456a10710c9c0707245a00cf3630297d6d579f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/accelerate/utils/operations.py
@@ -0,0 +1,851 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+A set of basic tensor ops compatible with tpu, gpu, and multigpu
+"""
+
+import pickle
+import warnings
+from functools import update_wrapper, wraps
+from typing import Any, Mapping
+
+import torch
+
+from ..state import PartialState
+from .constants import TORCH_DISTRIBUTED_OPERATION_TYPES
+from .dataclasses import DistributedType, TensorInformation
+from .imports import (
+ is_npu_available,
+ is_torch_distributed_available,
+ is_torch_version,
+ is_torch_xla_available,
+ is_xpu_available,
+)
+
+
+if is_torch_xla_available():
+ import torch_xla.core.xla_model as xm
+
+if is_torch_distributed_available():
+ from torch.distributed import ReduceOp
+
+
+def is_torch_tensor(tensor):
+ return isinstance(tensor, torch.Tensor)
+
+
+def is_torch_xpu_tensor(tensor):
+ return isinstance(
+ tensor,
+ torch.xpu.FloatTensor,
+ torch.xpu.ByteTensor,
+ torch.xpu.IntTensor,
+ torch.xpu.LongTensor,
+ torch.xpu.HalfTensor,
+ torch.xpu.DoubleTensor,
+ torch.xpu.BFloat16Tensor,
+ )
+
+
+def is_tensor_information(tensor_info):
+ return isinstance(tensor_info, TensorInformation)
+
+
+def is_namedtuple(data):
+ """
+ Checks if `data` is a `namedtuple` or not. Can have false positives, but only if a user is trying to mimic a
+ `namedtuple` perfectly.
+ """
+ return isinstance(data, tuple) and hasattr(data, "_asdict") and hasattr(data, "_fields")
+
+
+def honor_type(obj, generator):
+ """
+ Cast a generator to the same type as obj (list, tuple, or namedtuple)
+ """
+ # Some objects may not be able to instantiate from a generator directly
+ if is_namedtuple(obj):
+ return type(obj)(*list(generator))
+ else:
+ return type(obj)(generator)
+
+
+def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_other_type=False, **kwargs):
+ """
+ Recursively apply a function on a data structure that is a nested list/tuple/dictionary of a given base type.
+
+ Args:
+ func (`callable`):
+ The function to recursively apply.
+ data (nested list/tuple/dictionary of `main_type`):
+ The data on which to apply `func`
+ *args:
+ Positional arguments that will be passed to `func` when applied on the unpacked data.
+ main_type (`type`, *optional*, defaults to `torch.Tensor`):
+ The base type of the objects to which apply `func`.
+ error_on_other_type (`bool`, *optional*, defaults to `False`):
+ Whether to return an error or not if after unpacking `data`, we get on an object that is not of type
+ `main_type`. If `False`, the function will leave objects of types different than `main_type` unchanged.
+ **kwargs (additional keyword arguments, *optional*):
+ Keyword arguments that will be passed to `func` when applied on the unpacked data.
+
+ Returns:
+ The same data structure as `data` with `func` applied to every object of type `main_type`.
+ """
+ if isinstance(data, (tuple, list)):
+ return honor_type(
+ data,
+ (
+ recursively_apply(
+ func, o, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs
+ )
+ for o in data
+ ),
+ )
+ elif isinstance(data, Mapping):
+ return type(data)(
+ {
+ k: recursively_apply(
+ func, v, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs
+ )
+ for k, v in data.items()
+ }
+ )
+ elif test_type(data):
+ return func(data, *args, **kwargs)
+ elif error_on_other_type:
+ raise TypeError(
+ f"Unsupported types ({type(data)}) passed to `{func.__name__}`. Only nested list/tuple/dicts of "
+ f"objects that are valid for `{test_type.__name__}` should be passed."
+ )
+ return data
+
+
+def send_to_device(tensor, device, non_blocking=False, skip_keys=None):
+ """
+ Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device.
+
+ Args:
+ tensor (nested list/tuple/dictionary of `torch.Tensor`):
+ The data to send to a given device.
+ device (`torch.device`):
+ The device to send the data to.
+
+ Returns:
+ The same data structure as `tensor` with all tensors sent to the proper device.
+ """
+ if is_torch_tensor(tensor) or hasattr(tensor, "to"):
+ # `torch.Tensor.to("npu")` could not find context when called for the first time (see this [issue](https://gitee.com/ascend/pytorch/issues/I8KECW?from=project-issue)).
+ if device == "npu":
+ device = "npu:0"
+ if device == "xpu":
+ device = "xpu:0"
+ # TODO: torch_mlu LongTensor.to() has bugs, we will fix this later.
+ if is_torch_tensor(tensor) and tensor.device.type in ["mlu"] and tensor.dtype in [torch.int64]:
+ tensor = tensor.cpu()
+ try:
+ return tensor.to(device, non_blocking=non_blocking)
+ except TypeError: # .to() doesn't accept non_blocking as kwarg
+ return tensor.to(device)
+ except AssertionError as error:
+ # `torch.Tensor.to()` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)).
+ # This call is inside the try-block since is_npu_available is not supported by torch.compile.
+ if is_npu_available():
+ if isinstance(device, int):
+ device = f"npu:{device}"
+ else:
+ raise error
+ except Exception as error:
+ if is_xpu_available():
+ if isinstance(device, int):
+ device = f"xpu:{device}"
+ else:
+ raise error
+ try:
+ return tensor.to(device, non_blocking=non_blocking)
+ except TypeError: # .to() doesn't accept non_blocking as kwarg
+ return tensor.to(device)
+ elif isinstance(tensor, (tuple, list)):
+ return honor_type(
+ tensor, (send_to_device(t, device, non_blocking=non_blocking, skip_keys=skip_keys) for t in tensor)
+ )
+ elif isinstance(tensor, Mapping):
+ if isinstance(skip_keys, str):
+ skip_keys = [skip_keys]
+ elif skip_keys is None:
+ skip_keys = []
+ return type(tensor)(
+ {
+ k: t if k in skip_keys else send_to_device(t, device, non_blocking=non_blocking, skip_keys=skip_keys)
+ for k, t in tensor.items()
+ }
+ )
+ else:
+ return tensor
+
+
+def get_data_structure(data):
+ """
+ Recursively gathers the information needed to rebuild a nested list/tuple/dictionary of tensors.
+
+ Args:
+ data (nested list/tuple/dictionary of `torch.Tensor`):
+ The data to send to analyze.
+
+ Returns:
+ The same data structure as `data` with [`~utils.TensorInformation`] instead of tensors.
+ """
+
+ def _get_data_structure(tensor):
+ return TensorInformation(shape=tensor.shape, dtype=tensor.dtype)
+
+ return recursively_apply(_get_data_structure, data)
+
+
+def get_shape(data):
+ """
+ Recursively gathers the shape of a nested list/tuple/dictionary of tensors as a list.
+
+ Args:
+ data (nested list/tuple/dictionary of `torch.Tensor`):
+ The data to send to analyze.
+
+ Returns:
+ The same data structure as `data` with lists of tensor shapes instead of tensors.
+ """
+
+ def _get_shape(tensor):
+ return list(tensor.shape)
+
+ return recursively_apply(_get_shape, data)
+
+
+def initialize_tensors(data_structure):
+ """
+ Recursively initializes tensors from a nested list/tuple/dictionary of [`~utils.TensorInformation`].
+
+ Returns:
+ The same data structure as `data` with tensors instead of [`~utils.TensorInformation`].
+ """
+
+ def _initialize_tensor(tensor_info):
+ return torch.empty(*tensor_info.shape, dtype=tensor_info.dtype)
+
+ return recursively_apply(_initialize_tensor, data_structure, test_type=is_tensor_information)
+
+
+def find_batch_size(data):
+ """
+ Recursively finds the batch size in a nested list/tuple/dictionary of lists of tensors.
+
+ Args:
+ data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to find the batch size.
+
+ Returns:
+ `int`: The batch size.
+ """
+ if isinstance(data, (tuple, list, Mapping)) and (len(data) == 0):
+ raise ValueError(f"Cannot find the batch size from empty {type(data)}.")
+
+ if isinstance(data, (tuple, list)):
+ return find_batch_size(data[0])
+ elif isinstance(data, Mapping):
+ for k in data.keys():
+ return find_batch_size(data[k])
+ elif not isinstance(data, torch.Tensor):
+ raise TypeError(f"Can only find the batch size of tensors but got {type(data)}.")
+ return data.shape[0]
+
+
+def ignorant_find_batch_size(data):
+ """
+ Same as [`utils.operations.find_batch_size`] except will ignore if `ValueError` and `TypeErrors` are raised
+
+ Args:
+ data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to find the batch size.
+
+ Returns:
+ `int`: The batch size.
+ """
+ try:
+ return find_batch_size(data)
+ except (ValueError, TypeError):
+ pass
+ return None
+
+
+def listify(data):
+ """
+ Recursively finds tensors in a nested list/tuple/dictionary and converts them to a list of numbers.
+
+ Args:
+ data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to convert to regular numbers.
+
+ Returns:
+ The same data structure as `data` with lists of numbers instead of `torch.Tensor`.
+ """
+
+ def _convert_to_list(tensor):
+ tensor = tensor.detach().cpu()
+ if tensor.dtype == torch.bfloat16:
+ # As of Numpy 1.21.4, NumPy does not support bfloat16 (see
+ # https://github.com/numpy/numpy/blob/a47ecdea856986cd60eabbd53265c2ca5916ad5d/doc/source/user/basics.types.rst ).
+ # Until Numpy adds bfloat16, we must convert float32.
+ tensor = tensor.to(torch.float32)
+ return tensor.tolist()
+
+ return recursively_apply(_convert_to_list, data)
+
+
+def _tpu_gather(tensor):
+ def _tpu_gather_one(tensor):
+ if tensor.ndim == 0:
+ tensor = tensor.clone()[None]
+
+ # Can only gather contiguous tensors
+ if not tensor.is_contiguous():
+ tensor = tensor.contiguous()
+ return xm.all_gather(tensor)
+
+ res = recursively_apply(_tpu_gather_one, tensor, error_on_other_type=True)
+ xm.mark_step()
+ return res
+
+
+def _gpu_gather(tensor):
+ state = PartialState()
+ if is_torch_version(">=", "1.13"):
+ gather_op = torch.distributed.all_gather_into_tensor
+ else:
+ gather_op = torch.distributed._all_gather_base
+
+ def _gpu_gather_one(tensor):
+ if tensor.ndim == 0:
+ tensor = tensor.clone()[None]
+
+ # Can only gather contiguous tensors
+ if not tensor.is_contiguous():
+ tensor = tensor.contiguous()
+
+ if state.backend is not None and state.backend != "gloo":
+ # We use `empty` as `all_gather_into_tensor` slightly
+ # differs from `all_gather` for better efficiency,
+ # and we rely on the number of items in the tensor
+ # rather than its direct shape
+ output_tensors = torch.empty(
+ state.num_processes * tensor.numel(),
+ dtype=tensor.dtype,
+ device=state.device,
+ )
+ gather_op(output_tensors, tensor)
+ return output_tensors.view(-1, *tensor.size()[1:])
+ else:
+ # a backend of `None` is always CPU
+ # also gloo does not support `all_gather_into_tensor`,
+ # which will result in a larger memory overhead for the op
+ output_tensors = [torch.empty_like(tensor) for _ in range(state.num_processes)]
+ torch.distributed.all_gather(output_tensors, tensor)
+ return torch.cat(output_tensors, dim=0)
+
+ return recursively_apply(_gpu_gather_one, tensor, error_on_other_type=True)
+
+
+class DistributedOperationException(Exception):
+ """
+ An exception class for distributed operations. Raised if the operation cannot be performed due to the shape of the
+ tensors.
+ """
+
+ pass
+
+
+def verify_operation(function):
+ """
+ Verifies that `tensor` is the same shape across all processes. Only ran if `PartialState().debug` is `True`.
+ """
+
+ @wraps(function)
+ def wrapper(*args, **kwargs):
+ if PartialState().distributed_type == DistributedType.NO or not PartialState().debug:
+ return function(*args, **kwargs)
+ operation = f"{function.__module__}.{function.__name__}"
+ if "tensor" in kwargs:
+ tensor = kwargs["tensor"]
+ else:
+ tensor = args[0]
+ if PartialState().device.type != find_device(tensor).type:
+ raise DistributedOperationException(
+ f"One or more of the tensors passed to {operation} were not on the {tensor.device.type} while the `Accelerator` is configured for {PartialState().device.type}. "
+ f"Please move it to the {PartialState().device.type} before calling {operation}."
+ )
+ shapes = get_shape(tensor)
+ output = gather_object([shapes])
+ if output[0] is not None:
+ are_same = output.count(output[0]) == len(output)
+ if not are_same:
+ process_shape_str = "\n - ".join([f"Process {i}: {shape}" for i, shape in enumerate(output)])
+ raise DistributedOperationException(
+ f"Cannot apply desired operation due to shape mismatches. "
+ "All shapes across devices must be valid."
+ f"\n\nOperation: `{operation}`\nInput shapes:\n - {process_shape_str}"
+ )
+ return function(*args, **kwargs)
+
+ return wrapper
+
+
+def chained_operation(function):
+ """
+ Checks that `verify_operation` failed and if so reports a more helpful error chaining the existing
+ `DistributedOperationException`.
+ """
+
+ @wraps(function)
+ def wrapper(*args, **kwargs):
+ try:
+ return function(*args, **kwargs)
+ except DistributedOperationException as e:
+ operation = f"{function.__module__}.{function.__name__}"
+ raise DistributedOperationException(
+ f"Error found while calling `{operation}`. Please see the earlier error for more details."
+ ) from e
+
+ return wrapper
+
+
+@verify_operation
+def gather(tensor):
+ """
+ Recursively gather tensor in a nested list/tuple/dictionary of tensors from all devices.
+
+ Args:
+ tensor (nested list/tuple/dictionary of `torch.Tensor`):
+ The data to gather.
+
+ Returns:
+ The same data structure as `tensor` with all tensors sent to the proper device.
+ """
+ if PartialState().distributed_type == DistributedType.XLA:
+ return _tpu_gather(tensor)
+ elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES:
+ return _gpu_gather(tensor)
+ else:
+ return tensor
+
+
+def _gpu_gather_object(object: Any):
+ output_objects = [None for _ in range(PartialState().num_processes)]
+ torch.distributed.all_gather_object(output_objects, object)
+ # all_gather_object returns a list of lists, so we need to flatten it
+ return [x for y in output_objects for x in y]
+
+
+def gather_object(object: Any):
+ """
+ Recursively gather object in a nested list/tuple/dictionary of objects from all devices.
+
+ Args:
+ object (nested list/tuple/dictionary of picklable object):
+ The data to gather.
+
+ Returns:
+ The same data structure as `object` with all the objects sent to every device.
+ """
+ if PartialState().distributed_type == DistributedType.XLA:
+ raise NotImplementedError("gather objects in TPU is not supported")
+ elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES:
+ return _gpu_gather_object(object)
+ else:
+ return object
+
+
+def _gpu_broadcast(data, src=0):
+ def _gpu_broadcast_one(tensor, src=0):
+ torch.distributed.broadcast(tensor, src=src)
+ return tensor
+
+ return recursively_apply(_gpu_broadcast_one, data, error_on_other_type=True, src=src)
+
+
+def _tpu_broadcast(tensor, src=0, name="broadcast tensor"):
+ if isinstance(tensor, (list, tuple)):
+ return honor_type(tensor, (_tpu_broadcast(t, name=f"{name}_{i}") for i, t in enumerate(tensor)))
+ elif isinstance(tensor, Mapping):
+ return type(tensor)({k: _tpu_broadcast(v, name=f"{name}_{k}") for k, v in tensor.items()})
+ return xm.mesh_reduce(name, tensor, lambda x: x[src])
+
+
+TENSOR_TYPE_TO_INT = {
+ torch.float: 1,
+ torch.double: 2,
+ torch.half: 3,
+ torch.bfloat16: 4,
+ torch.uint8: 5,
+ torch.int8: 6,
+ torch.int16: 7,
+ torch.int32: 8,
+ torch.int64: 9,
+ torch.bool: 10,
+}
+
+TENSOR_INT_TO_DTYPE = {v: k for k, v in TENSOR_TYPE_TO_INT.items()}
+
+
+def gather_tensor_shape(tensor):
+ """
+ Grabs the shape of `tensor` only available on one process and returns a tensor of its shape
+ """
+ # Allocate 80 bytes to store the shape
+ max_tensor_dimension = 2**20
+ state = PartialState()
+ base_tensor = torch.empty(max_tensor_dimension, dtype=torch.int, device=state.device)
+
+ # Since PyTorch can't just send a tensor to another GPU without
+ # knowing its size, we store the size of the tensor with data
+ # in an allocation
+ if tensor is not None:
+ shape = tensor.shape
+ tensor_dtype = TENSOR_TYPE_TO_INT[tensor.dtype]
+ base_tensor[: len(shape) + 1] = torch.tensor(list(shape) + [tensor_dtype], dtype=int)
+ # Perform a reduction to copy the size data onto all GPUs
+ base_tensor = reduce(base_tensor, reduction="sum")
+ base_tensor = base_tensor[base_tensor.nonzero()]
+ # The last non-zero data contains the coded dtype the source tensor is
+ dtype = int(base_tensor[-1:][0])
+ base_tensor = base_tensor[:-1]
+ return base_tensor, dtype
+
+
+def copy_tensor_to_devices(tensor=None) -> torch.Tensor:
+ """
+ Copys a tensor that only exists on a single device and broadcasts it to other devices. Differs from `broadcast` as
+ each worker doesn't need to know its shape when used (and tensor can be `None`)
+
+ Args:
+ tensor (`torch.tensor`):
+ The tensor that should be sent to all devices. Must only have it be defined on a single device, the rest
+ should be `None`.
+ """
+ state = PartialState()
+ shape, dtype = gather_tensor_shape(tensor)
+ if tensor is None:
+ tensor = torch.zeros(shape, dtype=TENSOR_INT_TO_DTYPE[dtype]).to(state.device)
+ return reduce(tensor, reduction="sum")
+
+
+@verify_operation
+def broadcast(tensor, from_process: int = 0):
+ """
+ Recursively broadcast tensor in a nested list/tuple/dictionary of tensors to all devices.
+
+ Args:
+ tensor (nested list/tuple/dictionary of `torch.Tensor`):
+ The data to gather.
+ from_process (`int`, *optional*, defaults to 0):
+ The process from which to send the data
+
+ Returns:
+ The same data structure as `tensor` with all tensors broadcasted to the proper device.
+ """
+ if PartialState().distributed_type == DistributedType.XLA:
+ return _tpu_broadcast(tensor, src=from_process, name="accelerate.utils.broadcast")
+ elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES:
+ return _gpu_broadcast(tensor, src=from_process)
+ else:
+ return tensor
+
+
+def broadcast_object_list(object_list, from_process: int = 0):
+ """
+ Broadcast a list of picklable objects form one process to the others.
+
+ Args:
+ object_list (list of picklable objects):
+ The list of objects to broadcast. This list will be modified inplace.
+ from_process (`int`, *optional*, defaults to 0):
+ The process from which to send the data.
+
+ Returns:
+ The same list containing the objects from process 0.
+ """
+ if PartialState().distributed_type == DistributedType.XLA:
+ for i, obj in enumerate(object_list):
+ object_list[i] = xm.mesh_reduce("accelerate.utils.broadcast_object_list", obj, lambda x: x[from_process])
+ elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES:
+ torch.distributed.broadcast_object_list(object_list, src=from_process)
+ return object_list
+
+
+def slice_tensors(data, tensor_slice, process_index=None, num_processes=None):
+ """
+ Recursively takes a slice in a nested list/tuple/dictionary of tensors.
+
+ Args:
+ data (nested list/tuple/dictionary of `torch.Tensor`):
+ The data to slice.
+ tensor_slice (`slice`):
+ The slice to take.
+
+ Returns:
+ The same data structure as `data` with all the tensors slices.
+ """
+
+ def _slice_tensor(tensor, tensor_slice):
+ return tensor[tensor_slice]
+
+ return recursively_apply(_slice_tensor, data, tensor_slice)
+
+
+def concatenate(data, dim=0):
+ """
+ Recursively concatenate the tensors in a nested list/tuple/dictionary of lists of tensors with the same shape.
+
+ Args:
+ data (nested list/tuple/dictionary of lists of tensors `torch.Tensor`):
+ The data to concatenate.
+ dim (`int`, *optional*, defaults to 0):
+ The dimension on which to concatenate.
+
+ Returns:
+ The same data structure as `data` with all the tensors concatenated.
+ """
+ if isinstance(data[0], (tuple, list)):
+ return honor_type(data[0], (concatenate([d[i] for d in data], dim=dim) for i in range(len(data[0]))))
+ elif isinstance(data[0], Mapping):
+ return type(data[0])({k: concatenate([d[k] for d in data], dim=dim) for k in data[0].keys()})
+ elif not isinstance(data[0], torch.Tensor):
+ raise TypeError(f"Can only concatenate tensors but got {type(data[0])}")
+ return torch.cat(data, dim=dim)
+
+
+class CannotPadNestedTensorWarning(UserWarning):
+ pass
+
+
+@chained_operation
+def pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):
+ """
+ Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so they
+ can safely be gathered.
+
+ Args:
+ tensor (nested list/tuple/dictionary of `torch.Tensor`):
+ The data to gather.
+ dim (`int`, *optional*, defaults to 0):
+ The dimension on which to pad.
+ pad_index (`int`, *optional*, defaults to 0):
+ The value with which to pad.
+ pad_first (`bool`, *optional*, defaults to `False`):
+ Whether to pad at the beginning or the end.
+ """
+
+ def _pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):
+ if getattr(tensor, "is_nested", False):
+ warnings.warn(
+ "Cannot pad nested tensors without more information. Leaving unprocessed.",
+ CannotPadNestedTensorWarning,
+ )
+ return tensor
+ if dim >= len(tensor.shape):
+ return tensor
+
+ # Gather all sizes
+ size = torch.tensor(tensor.shape, device=tensor.device)[None]
+ sizes = gather(size).cpu()
+ # Then pad to the maximum size
+ max_size = max(s[dim] for s in sizes)
+ if max_size == tensor.shape[dim]:
+ return tensor
+
+ old_size = tensor.shape
+ new_size = list(old_size)
+ new_size[dim] = max_size
+ new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index
+ if pad_first:
+ indices = tuple(
+ slice(max_size - old_size[dim], max_size) if i == dim else slice(None) for i in range(len(new_size))
+ )
+ else:
+ indices = tuple(slice(0, old_size[dim]) if i == dim else slice(None) for i in range(len(new_size)))
+ new_tensor[indices] = tensor
+ return new_tensor
+
+ return recursively_apply(
+ _pad_across_processes, tensor, error_on_other_type=True, dim=dim, pad_index=pad_index, pad_first=pad_first
+ )
+
+
+def pad_input_tensors(tensor, batch_size, num_processes, dim=0):
+ """
+ Takes a `tensor` of arbitrary size and pads it so that it can work given `num_processes` needed dimensions.
+
+ New tensors are just the last input repeated.
+
+ E.g.:
+ Tensor: ([3,4,4]) Num processes: 4 Expected result shape: ([4,4,4])
+
+ """
+
+ def _pad_input_tensors(tensor, batch_size, num_processes, dim=0):
+ remainder = batch_size // num_processes
+ last_inputs = batch_size - (remainder * num_processes)
+ if batch_size // num_processes == 0:
+ to_pad = num_processes - batch_size
+ else:
+ to_pad = num_processes - (batch_size // num_processes)
+ # In the rare case that `to_pad` is negative,
+ # we need to pad the last inputs - the found `to_pad`
+ if last_inputs > to_pad & to_pad < 1:
+ to_pad = last_inputs - to_pad
+ old_size = tensor.shape
+ new_size = list(old_size)
+ new_size[0] = batch_size + to_pad
+ new_tensor = tensor.new_zeros(tuple(new_size))
+ indices = tuple(slice(0, old_size[dim]) if i == dim else slice(None) for i in range(len(new_size)))
+ new_tensor[indices] = tensor
+ return new_tensor
+
+ return recursively_apply(
+ _pad_input_tensors,
+ tensor,
+ error_on_other_type=True,
+ batch_size=batch_size,
+ num_processes=num_processes,
+ dim=dim,
+ )
+
+
+@verify_operation
+def reduce(tensor, reduction="mean", scale=1.0):
+ """
+ Recursively reduce the tensors in a nested list/tuple/dictionary of lists of tensors across all processes by the
+ mean of a given operation.
+
+ Args:
+ tensor (nested list/tuple/dictionary of `torch.Tensor`):
+ The data to reduce.
+ reduction (`str`, *optional*, defaults to `"mean"`):
+ A reduction method. Can be of "mean", "sum", or "none"
+ scale (`float`, *optional*):
+ A default scaling value to be applied after the reduce, only valied on XLA.
+
+ Returns:
+ The same data structure as `data` with all the tensors reduced.
+ """
+
+ def _reduce_across_processes(tensor, reduction="mean", scale=1.0):
+ state = PartialState()
+ cloned_tensor = tensor.clone()
+ if state.distributed_type == DistributedType.NO:
+ return cloned_tensor
+ if state.distributed_type == DistributedType.XLA:
+ # Some processes may have different HLO graphs than other
+ # processes, for example in the breakpoint API
+ # accelerator.set_trigger(). Use mark_step to make HLOs
+ # the same on all processes.
+ xm.mark_step()
+ xm.all_reduce(xm.REDUCE_SUM, [cloned_tensor], scale)
+ xm.mark_step()
+ elif state.distributed_type.value in TORCH_DISTRIBUTED_OPERATION_TYPES:
+ torch.distributed.all_reduce(cloned_tensor, ReduceOp.SUM)
+ if reduction == "mean":
+ cloned_tensor /= state.num_processes
+ return cloned_tensor
+
+ return recursively_apply(
+ _reduce_across_processes, tensor, error_on_other_type=True, reduction=reduction, scale=scale
+ )
+
+
+def convert_to_fp32(tensor):
+ """
+ Recursively converts the elements nested list/tuple/dictionary of tensors in FP16/BF16 precision to FP32.
+
+ Args:
+ tensor (nested list/tuple/dictionary of `torch.Tensor`):
+ The data to convert from FP16/BF16 to FP32.
+
+ Returns:
+ The same data structure as `tensor` with all tensors that were in FP16/BF16 precision converted to FP32.
+ """
+
+ def _convert_to_fp32(tensor):
+ return tensor.float()
+
+ def _is_fp16_bf16_tensor(tensor):
+ return (is_torch_tensor(tensor) or hasattr(tensor, "dtype")) and tensor.dtype in (
+ torch.float16,
+ torch.bfloat16,
+ )
+
+ return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_bf16_tensor)
+
+
+class ConvertOutputsToFp32:
+ """
+ Decorator to apply to a function outputing tensors (like a model forward pass) that ensures the outputs in FP16
+ precision will be convert back to FP32.
+
+ Args:
+ model_forward (`Callable`):
+ The function which outputs we want to treat.
+
+ Returns:
+ The same function as `model_forward` but with converted outputs.
+ """
+
+ def __init__(self, model_forward):
+ self.model_forward = model_forward
+ update_wrapper(self, model_forward)
+
+ def __call__(self, *args, **kwargs):
+ return convert_to_fp32(self.model_forward(*args, **kwargs))
+
+ def __getstate__(self):
+ raise pickle.PicklingError(
+ "Cannot pickle a prepared model with automatic mixed precision, please unwrap the model with `Accelerator.unwrap_model(model)` before pickling it."
+ )
+
+
+def convert_outputs_to_fp32(model_forward):
+ model_forward = ConvertOutputsToFp32(model_forward)
+
+ def forward(*args, **kwargs):
+ return model_forward(*args, **kwargs)
+
+ # To act like a decorator so that it can be popped when doing `extract_model_from_parallel`
+ forward.__wrapped__ = model_forward
+
+ return forward
+
+
+def find_device(data):
+ """
+ Finds the device on which a nested dict/list/tuple of tensors lies (assuming they are all on the same device).
+
+ Args:
+ (nested list/tuple/dictionary of `torch.Tensor`): The data we want to know the device of.
+ """
+ if isinstance(data, Mapping):
+ for obj in data.values():
+ device = find_device(obj)
+ if device is not None:
+ return device
+ elif isinstance(data, (tuple, list)):
+ for obj in data:
+ device = find_device(obj)
+ if device is not None:
+ return device
+ elif isinstance(data, torch.Tensor):
+ return data.device
diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/other.py b/venv/lib/python3.10/site-packages/accelerate/utils/other.py
new file mode 100644
index 0000000000000000000000000000000000000000..a313d08685be25707109c4973b346cdb0a4af90b
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/accelerate/utils/other.py
@@ -0,0 +1,366 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+import os
+import platform
+import re
+import socket
+from contextlib import contextmanager
+from functools import partial, reduce
+from types import MethodType
+from typing import OrderedDict
+
+import torch
+from packaging.version import Version
+from safetensors.torch import save_file as safe_save_file
+
+from ..commands.config.default import write_basic_config # noqa: F401
+from ..logging import get_logger
+from ..state import PartialState
+from .constants import FSDP_PYTORCH_VERSION
+from .dataclasses import DistributedType
+from .imports import is_deepspeed_available, is_torch_distributed_available, is_torch_xla_available
+from .modeling import id_tensor_storage
+from .transformer_engine import convert_model
+from .versions import is_torch_version
+
+
+logger = get_logger(__name__)
+
+
+if is_torch_xla_available():
+ import torch_xla.core.xla_model as xm
+
+
+def is_compiled_module(module):
+ """
+ Check whether the module was compiled with torch.compile()
+ """
+ if is_torch_version("<", "2.0.0") or not hasattr(torch, "_dynamo"):
+ return False
+ return isinstance(module, torch._dynamo.eval_frame.OptimizedModule)
+
+
+def extract_model_from_parallel(model, keep_fp32_wrapper: bool = True, recursive: bool = False):
+ """
+ Extract a model from its distributed containers.
+
+ Args:
+ model (`torch.nn.Module`):
+ The model to extract.
+ keep_fp32_wrapper (`bool`, *optional*):
+ Whether to remove mixed precision hooks from the model.
+ recursive (`bool`, *optional*, defaults to `False`):
+ Whether to recursively extract all cases of `module.module` from `model` as well as unwrap child sublayers
+ recursively, not just the top-level distributed containers.
+
+ Returns:
+ `torch.nn.Module`: The extracted model.
+ """
+ options = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
+
+ is_compiled = is_compiled_module(model)
+ if is_compiled:
+ compiled_model = model
+ model = model._orig_mod
+
+ if is_deepspeed_available():
+ from deepspeed import DeepSpeedEngine
+
+ options += (DeepSpeedEngine,)
+
+ if is_torch_version(">=", FSDP_PYTORCH_VERSION) and is_torch_distributed_available():
+ from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
+
+ options += (FSDP,)
+
+ while isinstance(model, options):
+ model = model.module
+
+ if recursive:
+ # This is needed in cases such as using FSDPv2 on XLA
+ def _recursive_unwrap(module):
+ # Wrapped modules are standardly wrapped as `module`, similar to the cases earlier
+ # with DDP, DataParallel, DeepSpeed, and FSDP
+ if hasattr(module, "module"):
+ unwrapped_module = _recursive_unwrap(module.module)
+ else:
+ unwrapped_module = module
+ # Next unwrap child sublayers recursively
+ for name, child in unwrapped_module.named_children():
+ setattr(unwrapped_module, name, _recursive_unwrap(child))
+ return unwrapped_module
+
+ # Start with top-level
+ model = _recursive_unwrap(model)
+
+ if not keep_fp32_wrapper:
+ forward = model.forward
+ original_forward = model.__dict__.pop("_original_forward", None)
+ if original_forward is not None:
+ while hasattr(forward, "__wrapped__"):
+ forward = forward.__wrapped__
+ if forward == original_forward:
+ break
+ model.forward = MethodType(forward, model)
+ if getattr(model, "_converted_to_transformer_engine", False):
+ convert_model(model, to_transformer_engine=False)
+
+ if is_compiled:
+ compiled_model._orig_mod = model
+ model = compiled_model
+
+ return model
+
+
+def wait_for_everyone():
+ """
+ Introduces a blocking point in the script, making sure all processes have reached this point before continuing.
+
+
+
+ Make sure all processes will reach this instruction otherwise one of your processes will hang forever.
+
+
+ """
+ PartialState().wait_for_everyone()
+
+
+def clean_state_dict_for_safetensors(state_dict: dict):
+ """
+ Cleans the state dictionary from a model and removes tensor aliasing if present.
+
+ Args:
+ state_dict (`dict`):
+ The state dictionary from a model
+ """
+ ptrs = collections.defaultdict(list)
+ # When bnb serialization is used, weights in state dict can be strings
+ for name, tensor in state_dict.items():
+ if not isinstance(tensor, str):
+ ptrs[id_tensor_storage(tensor)].append(name)
+
+ # These are all pointers of tensors with shared memory
+ shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1}
+ warn_names = set()
+ for names in shared_ptrs.values():
+ # When not all duplicates have been cleaned, we still remove those keys but put a clear warning.
+ # If the link between tensors was done at runtime then `from_pretrained` will not get
+ # the key back leading to random tensor. A proper warning will be shown
+ # during reload (if applicable), but since the file is not necessarily compatible with
+ # the config, better show a proper warning.
+ found_names = [name for name in names if name in state_dict]
+ warn_names.update(found_names[1:])
+ for name in found_names[1:]:
+ del state_dict[name]
+ if len(warn_names) > 0:
+ logger.warning(
+ f"Removed shared tensor {warn_names} while saving. This should be OK, but check by verifying that you don't receive any warning while reloading",
+ )
+ state_dict = {k: v.contiguous() if isinstance(v, torch.Tensor) else v for k, v in state_dict.items()}
+ return state_dict
+
+
+def save(obj, f, save_on_each_node: bool = False, safe_serialization: bool = False):
+ """
+ Save the data to disk. Use in place of `torch.save()`.
+
+ Args:
+ obj:
+ The data to save
+ f:
+ The file (or file-like object) to use to save the data
+ save_on_each_node (`bool`, *optional*, defaults to `False`):
+ Whether to only save on the global main process
+ safe_serialization (`bool`, *optional*, defaults to `False`):
+ Whether to save `obj` using `safetensors` or the traditional PyTorch way (that uses `pickle`).
+ """
+ # When TorchXLA is enabled, it's necessary to transfer all data to the CPU before saving.
+ # Another issue arises with `id_tensor_storage`, which treats all XLA tensors as identical.
+ # If tensors remain on XLA, calling `clean_state_dict_for_safetensors` will result in only
+ # one XLA tensor remaining.
+ if PartialState().distributed_type == DistributedType.XLA:
+ obj = xm._maybe_convert_to_cpu(obj)
+ # Check if it's a model and remove duplicates
+ if safe_serialization:
+ save_func = partial(safe_save_file, metadata={"format": "pt"})
+ if isinstance(obj, OrderedDict):
+ obj = clean_state_dict_for_safetensors(obj)
+ else:
+ save_func = torch.save
+
+ if PartialState().is_main_process and not save_on_each_node:
+ save_func(obj, f)
+ elif PartialState().is_local_main_process and save_on_each_node:
+ save_func(obj, f)
+
+
+@contextmanager
+def clear_environment():
+ """
+ A context manager that will temporarily clear environment variables.
+
+ When this context exits, the previous environment variables will be back.
+
+ Example:
+
+ ```python
+ >>> import os
+ >>> from accelerate.utils import clear_environment
+
+ >>> os.environ["FOO"] = "bar"
+ >>> with clear_environment():
+ ... print(os.environ)
+ ... os.environ["FOO"] = "new_bar"
+ ... print(os.environ["FOO"])
+ {}
+ new_bar
+
+ >>> print(os.environ["FOO"])
+ bar
+ ```
+ """
+ _old_os_environ = os.environ.copy()
+ os.environ.clear()
+
+ try:
+ yield
+ finally:
+ os.environ.clear() # clear any added keys,
+ os.environ.update(_old_os_environ) # then restore previous environment
+
+
+@contextmanager
+def patch_environment(**kwargs):
+ """
+ A context manager that will add each keyword argument passed to `os.environ` and remove them when exiting.
+
+ Will convert the values in `kwargs` to strings and upper-case all the keys.
+
+ Example:
+
+ ```python
+ >>> import os
+ >>> from accelerate.utils import patch_environment
+
+ >>> with patch_environment(FOO="bar"):
+ ... print(os.environ["FOO"]) # prints "bar"
+ >>> print(os.environ["FOO"]) # raises KeyError
+ ```
+ """
+ existing_vars = {}
+ for key, value in kwargs.items():
+ key = key.upper()
+ if key in os.environ:
+ existing_vars[key] = os.environ[key]
+ os.environ[key] = str(value)
+
+ try:
+ yield
+ finally:
+ for key in kwargs:
+ key = key.upper()
+ if key in existing_vars:
+ # restore previous value
+ os.environ[key] = existing_vars[key]
+ else:
+ os.environ.pop(key, None)
+
+
+def get_pretty_name(obj):
+ """
+ Gets a pretty name from `obj`.
+ """
+ if not hasattr(obj, "__qualname__") and not hasattr(obj, "__name__"):
+ obj = getattr(obj, "__class__", obj)
+ if hasattr(obj, "__qualname__"):
+ return obj.__qualname__
+ if hasattr(obj, "__name__"):
+ return obj.__name__
+ return str(obj)
+
+
+def merge_dicts(source, destination):
+ """
+ Recursively merges two dictionaries.
+
+ Args:
+ source (`dict`): The dictionary to merge into `destination`.
+ destination (`dict`): The dictionary to merge `source` into.
+ """
+ for key, value in source.items():
+ if isinstance(value, dict):
+ node = destination.setdefault(key, {})
+ merge_dicts(value, node)
+ else:
+ destination[key] = value
+
+ return destination
+
+
+def is_port_in_use(port: int = None) -> bool:
+ """
+ Checks if a port is in use on `localhost`. Useful for checking if multiple `accelerate launch` commands have been
+ run and need to see if the port is already in use.
+ """
+ if port is None:
+ port = 29500
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
+ return s.connect_ex(("localhost", port)) == 0
+
+
+def convert_bytes(size):
+ "Converts `size` from bytes to the largest possible unit"
+ for x in ["bytes", "KB", "MB", "GB", "TB"]:
+ if size < 1024.0:
+ return f"{round(size, 2)} {x}"
+ size /= 1024.0
+
+ return f"{round(size, 2)} PB"
+
+
+def check_os_kernel():
+ """Warns if the kernel version is below the recommended minimum on Linux."""
+ # see issue #1929
+ info = platform.uname()
+ system = info.system
+ if system != "Linux":
+ return
+
+ _, version, *_ = re.split(r"(\d+\.\d+\.\d+)", info.release)
+ min_version = "5.5.0"
+ if Version(version) < Version(min_version):
+ msg = (
+ f"Detected kernel version {version}, which is below the recommended minimum of {min_version}; this can "
+ "cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher."
+ )
+ logger.warning(msg, main_process_only=True)
+
+
+def recursive_getattr(obj, attr: str):
+ """
+ Recursive `getattr`.
+
+ Args:
+ obj:
+ A class instance holding the attribute.
+ attr (`str`):
+ The attribute that is to be retrieved, e.g. 'attribute1.attribute2'.
+ """
+
+ def _getattr(obj, attr):
+ return getattr(obj, attr)
+
+ return reduce(_getattr, [obj] + attr.split("."))
diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/random.py b/venv/lib/python3.10/site-packages/accelerate/utils/random.py
new file mode 100644
index 0000000000000000000000000000000000000000..f21312289a77bce9143c985292a14185f35f5938
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/accelerate/utils/random.py
@@ -0,0 +1,122 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import random
+from typing import List, Optional, Union
+
+import numpy as np
+import torch
+
+from ..state import AcceleratorState
+from .constants import CUDA_DISTRIBUTED_TYPES
+from .dataclasses import DistributedType, RNGType
+from .imports import is_mlu_available, is_npu_available, is_torch_xla_available, is_xpu_available
+
+
+if is_torch_xla_available():
+ import torch_xla.core.xla_model as xm
+
+
+def set_seed(seed: int, device_specific: bool = False, deterministic: bool = False):
+ """
+ Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch`.
+
+ Args:
+ seed (`int`):
+ The seed to set.
+ device_specific (`bool`, *optional*, defaults to `False`):
+ Whether to differ the seed on each device slightly with `self.process_index`.
+ deterministic (`bool`, *optional*, defaults to `False`):
+ Whether to use deterministic algorithms where available. Can slow down training.
+ """
+ if device_specific:
+ seed += AcceleratorState().process_index
+ random.seed(seed)
+ np.random.seed(seed)
+ torch.manual_seed(seed)
+ if is_xpu_available():
+ torch.xpu.manual_seed_all(seed)
+ elif is_npu_available():
+ torch.npu.manual_seed_all(seed)
+ elif is_mlu_available():
+ torch.mlu.manual_seed_all(seed)
+ else:
+ torch.cuda.manual_seed_all(seed)
+ # ^^ safe to call this function even if cuda is not available
+ if is_torch_xla_available():
+ xm.set_rng_state(seed)
+
+ if deterministic:
+ torch.use_deterministic_algorithms(True)
+
+
+def synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optional[torch.Generator] = None):
+ # Get the proper rng state
+ if rng_type == RNGType.TORCH:
+ rng_state = torch.get_rng_state()
+ elif rng_type == RNGType.CUDA:
+ rng_state = torch.cuda.get_rng_state()
+ elif rng_type == RNGType.XLA:
+ assert is_torch_xla_available(), "Can't synchronize XLA seeds as torch_xla is unavailable."
+ rng_state = torch.tensor(xm.get_rng_state())
+ elif rng_type == RNGType.NPU:
+ assert is_npu_available(), "Can't synchronize NPU seeds on an environment without NPUs."
+ rng_state = torch.npu.get_rng_state()
+ elif rng_type == RNGType.MLU:
+ assert is_mlu_available(), "Can't synchronize MLU seeds on an environment without MLUs."
+ rng_state = torch.mlu.get_rng_state()
+ elif rng_type == RNGType.XPU:
+ assert is_xpu_available(), "Can't synchronize XPU seeds on an environment without XPUs."
+ rng_state = torch.xpu.get_rng_state()
+ elif rng_type == RNGType.GENERATOR:
+ assert generator is not None, "Need a generator to synchronize its seed."
+ rng_state = generator.get_state()
+
+ # Broadcast the rng state from device 0 to other devices
+ state = AcceleratorState()
+ if state.distributed_type == DistributedType.XLA:
+ rng_state = rng_state.to(xm.xla_device())
+ xm.collective_broadcast([rng_state])
+ xm.mark_step()
+ rng_state = rng_state.cpu()
+ elif (
+ state.distributed_type in CUDA_DISTRIBUTED_TYPES
+ or state.distributed_type == DistributedType.MULTI_MLU
+ or state.distributed_type == DistributedType.MULTI_NPU
+ or state.distributed_type == DistributedType.MULTI_XPU
+ ):
+ rng_state = rng_state.to(state.device)
+ torch.distributed.broadcast(rng_state, 0)
+ rng_state = rng_state.cpu()
+ elif state.distributed_type == DistributedType.MULTI_CPU:
+ torch.distributed.broadcast(rng_state, 0)
+
+ # Set the broadcast rng state
+ if rng_type == RNGType.TORCH:
+ torch.set_rng_state(rng_state)
+ elif rng_type == RNGType.CUDA:
+ torch.cuda.set_rng_state(rng_state)
+ elif rng_type == RNGType.NPU:
+ torch.npu.set_rng_state(rng_state)
+ elif rng_type == RNGType.XPU:
+ torch.xpu.set_rng_state(rng_state)
+ elif rng_type == RNGType.XLA:
+ xm.set_rng_state(rng_state.item())
+ elif rng_type == RNGType.GENERATOR:
+ generator.set_state(rng_state)
+
+
+def synchronize_rng_states(rng_types: List[Union[str, RNGType]], generator: Optional[torch.Generator] = None):
+ for rng_type in rng_types:
+ synchronize_rng_state(RNGType(rng_type), generator=generator)
diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/torch_xla.py b/venv/lib/python3.10/site-packages/accelerate/utils/torch_xla.py
new file mode 100644
index 0000000000000000000000000000000000000000..140133926c2f88d39c70f5a9f46a08f88bed36da
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/accelerate/utils/torch_xla.py
@@ -0,0 +1,51 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import importlib.metadata
+import subprocess
+import sys
+
+
+def install_xla(upgrade: bool = False):
+ """
+ Helper function to install appropriate xla wheels based on the `torch` version in Google Colaboratory.
+
+ Args:
+ upgrade (`bool`, *optional*, defaults to `False`):
+ Whether to upgrade `torch` and install the latest `torch_xla` wheels.
+
+ Example:
+
+ ```python
+ >>> from accelerate.utils import install_xla
+
+ >>> install_xla(upgrade=True)
+ ```
+ """
+ in_colab = False
+ if "IPython" in sys.modules:
+ in_colab = "google.colab" in str(sys.modules["IPython"].get_ipython())
+
+ if in_colab:
+ if upgrade:
+ torch_install_cmd = ["pip", "install", "-U", "torch"]
+ subprocess.run(torch_install_cmd, check=True)
+ # get the current version of torch
+ torch_version = importlib.metadata.version("torch")
+ torch_version_trunc = torch_version[: torch_version.rindex(".")]
+ xla_wheel = f"https://storage.googleapis.com/tpu-pytorch/wheels/colab/torch_xla-{torch_version_trunc}-cp37-cp37m-linux_x86_64.whl"
+ xla_install_cmd = ["pip", "install", xla_wheel]
+ subprocess.run(xla_install_cmd, check=True)
+ else:
+ raise RuntimeError("`install_xla` utility works only on google colab.")
diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/versions.py b/venv/lib/python3.10/site-packages/accelerate/utils/versions.py
new file mode 100644
index 0000000000000000000000000000000000000000..985c918f0e057bacc70c372f6906071bb73db577
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/accelerate/utils/versions.py
@@ -0,0 +1,56 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import importlib.metadata
+from typing import Union
+
+from packaging.version import Version, parse
+
+from .constants import STR_OPERATION_TO_FUNC
+
+
+torch_version = parse(importlib.metadata.version("torch"))
+
+
+def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str):
+ """
+ Compares a library version to some requirement using a given operation.
+
+ Args:
+ library_or_version (`str` or `packaging.version.Version`):
+ A library name or a version to check.
+ operation (`str`):
+ A string representation of an operator, such as `">"` or `"<="`.
+ requirement_version (`str`):
+ The version to compare the library version against
+ """
+ if operation not in STR_OPERATION_TO_FUNC.keys():
+ raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}")
+ operation = STR_OPERATION_TO_FUNC[operation]
+ if isinstance(library_or_version, str):
+ library_or_version = parse(importlib.metadata.version(library_or_version))
+ return operation(library_or_version, parse(requirement_version))
+
+
+def is_torch_version(operation: str, version: str):
+ """
+ Compares the current PyTorch version to a given reference with an operation.
+
+ Args:
+ operation (`str`):
+ A string representation of an operator, such as `">"` or `"<="`
+ version (`str`):
+ A string version of PyTorch
+ """
+ return compare_versions(torch_version, operation, version)
diff --git a/venv/lib/python3.10/site-packages/more_itertools/__init__.py b/venv/lib/python3.10/site-packages/more_itertools/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..aff94a9abd02da42c8c012ea973ac8e812a52284
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/more_itertools/__init__.py
@@ -0,0 +1,6 @@
+"""More routines for operating on iterables, beyond itertools"""
+
+from .more import * # noqa
+from .recipes import * # noqa
+
+__version__ = '10.2.0'
diff --git a/venv/lib/python3.10/site-packages/more_itertools/__init__.pyi b/venv/lib/python3.10/site-packages/more_itertools/__init__.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..96f6e36c7f4ac9ea0aebdcd9e11b8d1ff092d2ef
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/more_itertools/__init__.pyi
@@ -0,0 +1,2 @@
+from .more import *
+from .recipes import *
diff --git a/venv/lib/python3.10/site-packages/more_itertools/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/more_itertools/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..07615eeeef269a6c8ee5891680cfbce59f3d72d7
Binary files /dev/null and b/venv/lib/python3.10/site-packages/more_itertools/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/more_itertools/__pycache__/more.cpython-310.pyc b/venv/lib/python3.10/site-packages/more_itertools/__pycache__/more.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..31592aeb9776c7df43cfd7032a43125ae7fc5ee9
Binary files /dev/null and b/venv/lib/python3.10/site-packages/more_itertools/__pycache__/more.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/more_itertools/__pycache__/recipes.cpython-310.pyc b/venv/lib/python3.10/site-packages/more_itertools/__pycache__/recipes.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f1f972199c3999b834313747fbe7cb0e39f9f364
Binary files /dev/null and b/venv/lib/python3.10/site-packages/more_itertools/__pycache__/recipes.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/more_itertools/more.py b/venv/lib/python3.10/site-packages/more_itertools/more.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd711a47634861f8106eda506bd6d2e521bcbb8b
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/more_itertools/more.py
@@ -0,0 +1,4656 @@
+import warnings
+
+from collections import Counter, defaultdict, deque, abc
+from collections.abc import Sequence
+from functools import cached_property, partial, reduce, wraps
+from heapq import heapify, heapreplace, heappop
+from itertools import (
+ chain,
+ compress,
+ count,
+ cycle,
+ dropwhile,
+ groupby,
+ islice,
+ repeat,
+ starmap,
+ takewhile,
+ tee,
+ zip_longest,
+ product,
+)
+from math import exp, factorial, floor, log, perm, comb
+from queue import Empty, Queue
+from random import random, randrange, uniform
+from operator import itemgetter, mul, sub, gt, lt, ge, le
+from sys import hexversion, maxsize
+from time import monotonic
+
+from .recipes import (
+ _marker,
+ _zip_equal,
+ UnequalIterablesError,
+ consume,
+ flatten,
+ pairwise,
+ powerset,
+ take,
+ unique_everseen,
+ all_equal,
+ batched,
+)
+
+__all__ = [
+ 'AbortThread',
+ 'SequenceView',
+ 'UnequalIterablesError',
+ 'adjacent',
+ 'all_unique',
+ 'always_iterable',
+ 'always_reversible',
+ 'bucket',
+ 'callback_iter',
+ 'chunked',
+ 'chunked_even',
+ 'circular_shifts',
+ 'collapse',
+ 'combination_index',
+ 'combination_with_replacement_index',
+ 'consecutive_groups',
+ 'constrained_batches',
+ 'consumer',
+ 'count_cycle',
+ 'countable',
+ 'difference',
+ 'distinct_combinations',
+ 'distinct_permutations',
+ 'distribute',
+ 'divide',
+ 'duplicates_everseen',
+ 'duplicates_justseen',
+ 'classify_unique',
+ 'exactly_n',
+ 'filter_except',
+ 'filter_map',
+ 'first',
+ 'gray_product',
+ 'groupby_transform',
+ 'ichunked',
+ 'iequals',
+ 'ilen',
+ 'interleave',
+ 'interleave_evenly',
+ 'interleave_longest',
+ 'intersperse',
+ 'is_sorted',
+ 'islice_extended',
+ 'iterate',
+ 'iter_suppress',
+ 'last',
+ 'locate',
+ 'longest_common_prefix',
+ 'lstrip',
+ 'make_decorator',
+ 'map_except',
+ 'map_if',
+ 'map_reduce',
+ 'mark_ends',
+ 'minmax',
+ 'nth_or_last',
+ 'nth_permutation',
+ 'nth_product',
+ 'nth_combination_with_replacement',
+ 'numeric_range',
+ 'one',
+ 'only',
+ 'outer_product',
+ 'padded',
+ 'partial_product',
+ 'partitions',
+ 'peekable',
+ 'permutation_index',
+ 'product_index',
+ 'raise_',
+ 'repeat_each',
+ 'repeat_last',
+ 'replace',
+ 'rlocate',
+ 'rstrip',
+ 'run_length',
+ 'sample',
+ 'seekable',
+ 'set_partitions',
+ 'side_effect',
+ 'sliced',
+ 'sort_together',
+ 'split_after',
+ 'split_at',
+ 'split_before',
+ 'split_into',
+ 'split_when',
+ 'spy',
+ 'stagger',
+ 'strip',
+ 'strictly_n',
+ 'substrings',
+ 'substrings_indexes',
+ 'takewhile_inclusive',
+ 'time_limited',
+ 'unique_in_window',
+ 'unique_to_each',
+ 'unzip',
+ 'value_chain',
+ 'windowed',
+ 'windowed_complete',
+ 'with_iter',
+ 'zip_broadcast',
+ 'zip_equal',
+ 'zip_offset',
+]
+
+
+def chunked(iterable, n, strict=False):
+ """Break *iterable* into lists of length *n*:
+
+ >>> list(chunked([1, 2, 3, 4, 5, 6], 3))
+ [[1, 2, 3], [4, 5, 6]]
+
+ By the default, the last yielded list will have fewer than *n* elements
+ if the length of *iterable* is not divisible by *n*:
+
+ >>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3))
+ [[1, 2, 3], [4, 5, 6], [7, 8]]
+
+ To use a fill-in value instead, see the :func:`grouper` recipe.
+
+ If the length of *iterable* is not divisible by *n* and *strict* is
+ ``True``, then ``ValueError`` will be raised before the last
+ list is yielded.
+
+ """
+ iterator = iter(partial(take, n, iter(iterable)), [])
+ if strict:
+ if n is None:
+ raise ValueError('n must not be None when using strict mode.')
+
+ def ret():
+ for chunk in iterator:
+ if len(chunk) != n:
+ raise ValueError('iterable is not divisible by n.')
+ yield chunk
+
+ return iter(ret())
+ else:
+ return iterator
+
+
+def first(iterable, default=_marker):
+ """Return the first item of *iterable*, or *default* if *iterable* is
+ empty.
+
+ >>> first([0, 1, 2, 3])
+ 0
+ >>> first([], 'some default')
+ 'some default'
+
+ If *default* is not provided and there are no items in the iterable,
+ raise ``ValueError``.
+
+ :func:`first` is useful when you have a generator of expensive-to-retrieve
+ values and want any arbitrary one. It is marginally shorter than
+ ``next(iter(iterable), default)``.
+
+ """
+ for item in iterable:
+ return item
+ if default is _marker:
+ raise ValueError(
+ 'first() was called on an empty iterable, and no '
+ 'default value was provided.'
+ )
+ return default
+
+
+def last(iterable, default=_marker):
+ """Return the last item of *iterable*, or *default* if *iterable* is
+ empty.
+
+ >>> last([0, 1, 2, 3])
+ 3
+ >>> last([], 'some default')
+ 'some default'
+
+ If *default* is not provided and there are no items in the iterable,
+ raise ``ValueError``.
+ """
+ try:
+ if isinstance(iterable, Sequence):
+ return iterable[-1]
+ # Work around https://bugs.python.org/issue38525
+ elif hasattr(iterable, '__reversed__') and (hexversion != 0x030800F0):
+ return next(reversed(iterable))
+ else:
+ return deque(iterable, maxlen=1)[-1]
+ except (IndexError, TypeError, StopIteration):
+ if default is _marker:
+ raise ValueError(
+ 'last() was called on an empty iterable, and no default was '
+ 'provided.'
+ )
+ return default
+
+
+def nth_or_last(iterable, n, default=_marker):
+ """Return the nth or the last item of *iterable*,
+ or *default* if *iterable* is empty.
+
+ >>> nth_or_last([0, 1, 2, 3], 2)
+ 2
+ >>> nth_or_last([0, 1], 2)
+ 1
+ >>> nth_or_last([], 0, 'some default')
+ 'some default'
+
+ If *default* is not provided and there are no items in the iterable,
+ raise ``ValueError``.
+ """
+ return last(islice(iterable, n + 1), default=default)
+
+
+class peekable:
+ """Wrap an iterator to allow lookahead and prepending elements.
+
+ Call :meth:`peek` on the result to get the value that will be returned
+ by :func:`next`. This won't advance the iterator:
+
+ >>> p = peekable(['a', 'b'])
+ >>> p.peek()
+ 'a'
+ >>> next(p)
+ 'a'
+
+ Pass :meth:`peek` a default value to return that instead of raising
+ ``StopIteration`` when the iterator is exhausted.
+
+ >>> p = peekable([])
+ >>> p.peek('hi')
+ 'hi'
+
+ peekables also offer a :meth:`prepend` method, which "inserts" items
+ at the head of the iterable:
+
+ >>> p = peekable([1, 2, 3])
+ >>> p.prepend(10, 11, 12)
+ >>> next(p)
+ 10
+ >>> p.peek()
+ 11
+ >>> list(p)
+ [11, 12, 1, 2, 3]
+
+ peekables can be indexed. Index 0 is the item that will be returned by
+ :func:`next`, index 1 is the item after that, and so on:
+ The values up to the given index will be cached.
+
+ >>> p = peekable(['a', 'b', 'c', 'd'])
+ >>> p[0]
+ 'a'
+ >>> p[1]
+ 'b'
+ >>> next(p)
+ 'a'
+
+ Negative indexes are supported, but be aware that they will cache the
+ remaining items in the source iterator, which may require significant
+ storage.
+
+ To check whether a peekable is exhausted, check its truth value:
+
+ >>> p = peekable(['a', 'b'])
+ >>> if p: # peekable has items
+ ... list(p)
+ ['a', 'b']
+ >>> if not p: # peekable is exhausted
+ ... list(p)
+ []
+
+ """
+
+ def __init__(self, iterable):
+ self._it = iter(iterable)
+ self._cache = deque()
+
+ def __iter__(self):
+ return self
+
+ def __bool__(self):
+ try:
+ self.peek()
+ except StopIteration:
+ return False
+ return True
+
+ def peek(self, default=_marker):
+ """Return the item that will be next returned from ``next()``.
+
+ Return ``default`` if there are no items left. If ``default`` is not
+ provided, raise ``StopIteration``.
+
+ """
+ if not self._cache:
+ try:
+ self._cache.append(next(self._it))
+ except StopIteration:
+ if default is _marker:
+ raise
+ return default
+ return self._cache[0]
+
+ def prepend(self, *items):
+ """Stack up items to be the next ones returned from ``next()`` or
+ ``self.peek()``. The items will be returned in
+ first in, first out order::
+
+ >>> p = peekable([1, 2, 3])
+ >>> p.prepend(10, 11, 12)
+ >>> next(p)
+ 10
+ >>> list(p)
+ [11, 12, 1, 2, 3]
+
+ It is possible, by prepending items, to "resurrect" a peekable that
+ previously raised ``StopIteration``.
+
+ >>> p = peekable([])
+ >>> next(p)
+ Traceback (most recent call last):
+ ...
+ StopIteration
+ >>> p.prepend(1)
+ >>> next(p)
+ 1
+ >>> next(p)
+ Traceback (most recent call last):
+ ...
+ StopIteration
+
+ """
+ self._cache.extendleft(reversed(items))
+
+ def __next__(self):
+ if self._cache:
+ return self._cache.popleft()
+
+ return next(self._it)
+
+ def _get_slice(self, index):
+ # Normalize the slice's arguments
+ step = 1 if (index.step is None) else index.step
+ if step > 0:
+ start = 0 if (index.start is None) else index.start
+ stop = maxsize if (index.stop is None) else index.stop
+ elif step < 0:
+ start = -1 if (index.start is None) else index.start
+ stop = (-maxsize - 1) if (index.stop is None) else index.stop
+ else:
+ raise ValueError('slice step cannot be zero')
+
+ # If either the start or stop index is negative, we'll need to cache
+ # the rest of the iterable in order to slice from the right side.
+ if (start < 0) or (stop < 0):
+ self._cache.extend(self._it)
+ # Otherwise we'll need to find the rightmost index and cache to that
+ # point.
+ else:
+ n = min(max(start, stop) + 1, maxsize)
+ cache_len = len(self._cache)
+ if n >= cache_len:
+ self._cache.extend(islice(self._it, n - cache_len))
+
+ return list(self._cache)[index]
+
+ def __getitem__(self, index):
+ if isinstance(index, slice):
+ return self._get_slice(index)
+
+ cache_len = len(self._cache)
+ if index < 0:
+ self._cache.extend(self._it)
+ elif index >= cache_len:
+ self._cache.extend(islice(self._it, index + 1 - cache_len))
+
+ return self._cache[index]
+
+
+def consumer(func):
+ """Decorator that automatically advances a PEP-342-style "reverse iterator"
+ to its first yield point so you don't have to call ``next()`` on it
+ manually.
+
+ >>> @consumer
+ ... def tally():
+ ... i = 0
+ ... while True:
+ ... print('Thing number %s is %s.' % (i, (yield)))
+ ... i += 1
+ ...
+ >>> t = tally()
+ >>> t.send('red')
+ Thing number 0 is red.
+ >>> t.send('fish')
+ Thing number 1 is fish.
+
+ Without the decorator, you would have to call ``next(t)`` before
+ ``t.send()`` could be used.
+
+ """
+
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ gen = func(*args, **kwargs)
+ next(gen)
+ return gen
+
+ return wrapper
+
+
+def ilen(iterable):
+ """Return the number of items in *iterable*.
+
+ >>> ilen(x for x in range(1000000) if x % 3 == 0)
+ 333334
+
+ This consumes the iterable, so handle with care.
+
+ """
+ # This approach was selected because benchmarks showed it's likely the
+ # fastest of the known implementations at the time of writing.
+ # See GitHub tracker: #236, #230.
+ counter = count()
+ deque(zip(iterable, counter), maxlen=0)
+ return next(counter)
+
+
+def iterate(func, start):
+ """Return ``start``, ``func(start)``, ``func(func(start))``, ...
+
+ >>> from itertools import islice
+ >>> list(islice(iterate(lambda x: 2*x, 1), 10))
+ [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+
+ """
+ while True:
+ yield start
+ try:
+ start = func(start)
+ except StopIteration:
+ break
+
+
+def with_iter(context_manager):
+ """Wrap an iterable in a ``with`` statement, so it closes once exhausted.
+
+ For example, this will close the file when the iterator is exhausted::
+
+ upper_lines = (line.upper() for line in with_iter(open('foo')))
+
+ Any context manager which returns an iterable is a candidate for
+ ``with_iter``.
+
+ """
+ with context_manager as iterable:
+ yield from iterable
+
+
+def one(iterable, too_short=None, too_long=None):
+ """Return the first item from *iterable*, which is expected to contain only
+ that item. Raise an exception if *iterable* is empty or has more than one
+ item.
+
+ :func:`one` is useful for ensuring that an iterable contains only one item.
+ For example, it can be used to retrieve the result of a database query
+ that is expected to return a single row.
+
+ If *iterable* is empty, ``ValueError`` will be raised. You may specify a
+ different exception with the *too_short* keyword:
+
+ >>> it = []
+ >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ ValueError: too many items in iterable (expected 1)'
+ >>> too_short = IndexError('too few items')
+ >>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ IndexError: too few items
+
+ Similarly, if *iterable* contains more than one item, ``ValueError`` will
+ be raised. You may specify a different exception with the *too_long*
+ keyword:
+
+ >>> it = ['too', 'many']
+ >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ ValueError: Expected exactly one item in iterable, but got 'too',
+ 'many', and perhaps more.
+ >>> too_long = RuntimeError
+ >>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ RuntimeError
+
+ Note that :func:`one` attempts to advance *iterable* twice to ensure there
+ is only one item. See :func:`spy` or :func:`peekable` to check iterable
+ contents less destructively.
+
+ """
+ it = iter(iterable)
+
+ try:
+ first_value = next(it)
+ except StopIteration as e:
+ raise (
+ too_short or ValueError('too few items in iterable (expected 1)')
+ ) from e
+
+ try:
+ second_value = next(it)
+ except StopIteration:
+ pass
+ else:
+ msg = (
+ 'Expected exactly one item in iterable, but got {!r}, {!r}, '
+ 'and perhaps more.'.format(first_value, second_value)
+ )
+ raise too_long or ValueError(msg)
+
+ return first_value
+
+
+def raise_(exception, *args):
+ raise exception(*args)
+
+
+def strictly_n(iterable, n, too_short=None, too_long=None):
+ """Validate that *iterable* has exactly *n* items and return them if
+ it does. If it has fewer than *n* items, call function *too_short*
+ with those items. If it has more than *n* items, call function
+ *too_long* with the first ``n + 1`` items.
+
+ >>> iterable = ['a', 'b', 'c', 'd']
+ >>> n = 4
+ >>> list(strictly_n(iterable, n))
+ ['a', 'b', 'c', 'd']
+
+ Note that the returned iterable must be consumed in order for the check to
+ be made.
+
+ By default, *too_short* and *too_long* are functions that raise
+ ``ValueError``.
+
+ >>> list(strictly_n('ab', 3)) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ ValueError: too few items in iterable (got 2)
+
+ >>> list(strictly_n('abc', 2)) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ ValueError: too many items in iterable (got at least 3)
+
+ You can instead supply functions that do something else.
+ *too_short* will be called with the number of items in *iterable*.
+ *too_long* will be called with `n + 1`.
+
+ >>> def too_short(item_count):
+ ... raise RuntimeError
+ >>> it = strictly_n('abcd', 6, too_short=too_short)
+ >>> list(it) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ RuntimeError
+
+ >>> def too_long(item_count):
+ ... print('The boss is going to hear about this')
+ >>> it = strictly_n('abcdef', 4, too_long=too_long)
+ >>> list(it)
+ The boss is going to hear about this
+ ['a', 'b', 'c', 'd']
+
+ """
+ if too_short is None:
+ too_short = lambda item_count: raise_(
+ ValueError,
+ 'Too few items in iterable (got {})'.format(item_count),
+ )
+
+ if too_long is None:
+ too_long = lambda item_count: raise_(
+ ValueError,
+ 'Too many items in iterable (got at least {})'.format(item_count),
+ )
+
+ it = iter(iterable)
+ for i in range(n):
+ try:
+ item = next(it)
+ except StopIteration:
+ too_short(i)
+ return
+ else:
+ yield item
+
+ try:
+ next(it)
+ except StopIteration:
+ pass
+ else:
+ too_long(n + 1)
+
+
+def distinct_permutations(iterable, r=None):
+ """Yield successive distinct permutations of the elements in *iterable*.
+
+ >>> sorted(distinct_permutations([1, 0, 1]))
+ [(0, 1, 1), (1, 0, 1), (1, 1, 0)]
+
+ Equivalent to ``set(permutations(iterable))``, except duplicates are not
+ generated and thrown away. For larger input sequences this is much more
+ efficient.
+
+ Duplicate permutations arise when there are duplicated elements in the
+ input iterable. The number of items returned is
+ `n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of
+ items input, and each `x_i` is the count of a distinct item in the input
+ sequence.
+
+ If *r* is given, only the *r*-length permutations are yielded.
+
+ >>> sorted(distinct_permutations([1, 0, 1], r=2))
+ [(0, 1), (1, 0), (1, 1)]
+ >>> sorted(distinct_permutations(range(3), r=2))
+ [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
+
+ """
+
+ # Algorithm: https://w.wiki/Qai
+ def _full(A):
+ while True:
+ # Yield the permutation we have
+ yield tuple(A)
+
+ # Find the largest index i such that A[i] < A[i + 1]
+ for i in range(size - 2, -1, -1):
+ if A[i] < A[i + 1]:
+ break
+ # If no such index exists, this permutation is the last one
+ else:
+ return
+
+ # Find the largest index j greater than j such that A[i] < A[j]
+ for j in range(size - 1, i, -1):
+ if A[i] < A[j]:
+ break
+
+ # Swap the value of A[i] with that of A[j], then reverse the
+ # sequence from A[i + 1] to form the new permutation
+ A[i], A[j] = A[j], A[i]
+ A[i + 1 :] = A[: i - size : -1] # A[i + 1:][::-1]
+
+ # Algorithm: modified from the above
+ def _partial(A, r):
+ # Split A into the first r items and the last r items
+ head, tail = A[:r], A[r:]
+ right_head_indexes = range(r - 1, -1, -1)
+ left_tail_indexes = range(len(tail))
+
+ while True:
+ # Yield the permutation we have
+ yield tuple(head)
+
+ # Starting from the right, find the first index of the head with
+ # value smaller than the maximum value of the tail - call it i.
+ pivot = tail[-1]
+ for i in right_head_indexes:
+ if head[i] < pivot:
+ break
+ pivot = head[i]
+ else:
+ return
+
+ # Starting from the left, find the first value of the tail
+ # with a value greater than head[i] and swap.
+ for j in left_tail_indexes:
+ if tail[j] > head[i]:
+ head[i], tail[j] = tail[j], head[i]
+ break
+ # If we didn't find one, start from the right and find the first
+ # index of the head with a value greater than head[i] and swap.
+ else:
+ for j in right_head_indexes:
+ if head[j] > head[i]:
+ head[i], head[j] = head[j], head[i]
+ break
+
+ # Reverse head[i + 1:] and swap it with tail[:r - (i + 1)]
+ tail += head[: i - r : -1] # head[i + 1:][::-1]
+ i += 1
+ head[i:], tail[:] = tail[: r - i], tail[r - i :]
+
+ items = sorted(iterable)
+
+ size = len(items)
+ if r is None:
+ r = size
+
+ if 0 < r <= size:
+ return _full(items) if (r == size) else _partial(items, r)
+
+ return iter(() if r else ((),))
+
+
+def intersperse(e, iterable, n=1):
+ """Intersperse filler element *e* among the items in *iterable*, leaving
+ *n* items between each filler element.
+
+ >>> list(intersperse('!', [1, 2, 3, 4, 5]))
+ [1, '!', 2, '!', 3, '!', 4, '!', 5]
+
+ >>> list(intersperse(None, [1, 2, 3, 4, 5], n=2))
+ [1, 2, None, 3, 4, None, 5]
+
+ """
+ if n == 0:
+ raise ValueError('n must be > 0')
+ elif n == 1:
+ # interleave(repeat(e), iterable) -> e, x_0, e, x_1, e, x_2...
+ # islice(..., 1, None) -> x_0, e, x_1, e, x_2...
+ return islice(interleave(repeat(e), iterable), 1, None)
+ else:
+ # interleave(filler, chunks) -> [e], [x_0, x_1], [e], [x_2, x_3]...
+ # islice(..., 1, None) -> [x_0, x_1], [e], [x_2, x_3]...
+ # flatten(...) -> x_0, x_1, e, x_2, x_3...
+ filler = repeat([e])
+ chunks = chunked(iterable, n)
+ return flatten(islice(interleave(filler, chunks), 1, None))
+
+
+def unique_to_each(*iterables):
+ """Return the elements from each of the input iterables that aren't in the
+ other input iterables.
+
+ For example, suppose you have a set of packages, each with a set of
+ dependencies::
+
+ {'pkg_1': {'A', 'B'}, 'pkg_2': {'B', 'C'}, 'pkg_3': {'B', 'D'}}
+
+ If you remove one package, which dependencies can also be removed?
+
+ If ``pkg_1`` is removed, then ``A`` is no longer necessary - it is not
+ associated with ``pkg_2`` or ``pkg_3``. Similarly, ``C`` is only needed for
+ ``pkg_2``, and ``D`` is only needed for ``pkg_3``::
+
+ >>> unique_to_each({'A', 'B'}, {'B', 'C'}, {'B', 'D'})
+ [['A'], ['C'], ['D']]
+
+ If there are duplicates in one input iterable that aren't in the others
+ they will be duplicated in the output. Input order is preserved::
+
+ >>> unique_to_each("mississippi", "missouri")
+ [['p', 'p'], ['o', 'u', 'r']]
+
+ It is assumed that the elements of each iterable are hashable.
+
+ """
+ pool = [list(it) for it in iterables]
+ counts = Counter(chain.from_iterable(map(set, pool)))
+ uniques = {element for element in counts if counts[element] == 1}
+ return [list(filter(uniques.__contains__, it)) for it in pool]
+
+
+def windowed(seq, n, fillvalue=None, step=1):
+ """Return a sliding window of width *n* over the given iterable.
+
+ >>> all_windows = windowed([1, 2, 3, 4, 5], 3)
+ >>> list(all_windows)
+ [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
+
+ When the window is larger than the iterable, *fillvalue* is used in place
+ of missing values:
+
+ >>> list(windowed([1, 2, 3], 4))
+ [(1, 2, 3, None)]
+
+ Each window will advance in increments of *step*:
+
+ >>> list(windowed([1, 2, 3, 4, 5, 6], 3, fillvalue='!', step=2))
+ [(1, 2, 3), (3, 4, 5), (5, 6, '!')]
+
+ To slide into the iterable's items, use :func:`chain` to add filler items
+ to the left:
+
+ >>> iterable = [1, 2, 3, 4]
+ >>> n = 3
+ >>> padding = [None] * (n - 1)
+ >>> list(windowed(chain(padding, iterable), 3))
+ [(None, None, 1), (None, 1, 2), (1, 2, 3), (2, 3, 4)]
+ """
+ if n < 0:
+ raise ValueError('n must be >= 0')
+ if n == 0:
+ yield tuple()
+ return
+ if step < 1:
+ raise ValueError('step must be >= 1')
+
+ window = deque(maxlen=n)
+ i = n
+ for _ in map(window.append, seq):
+ i -= 1
+ if not i:
+ i = step
+ yield tuple(window)
+
+ size = len(window)
+ if size == 0:
+ return
+ elif size < n:
+ yield tuple(chain(window, repeat(fillvalue, n - size)))
+ elif 0 < i < min(step, n):
+ window += (fillvalue,) * i
+ yield tuple(window)
+
+
+def substrings(iterable):
+ """Yield all of the substrings of *iterable*.
+
+ >>> [''.join(s) for s in substrings('more')]
+ ['m', 'o', 'r', 'e', 'mo', 'or', 're', 'mor', 'ore', 'more']
+
+ Note that non-string iterables can also be subdivided.
+
+ >>> list(substrings([0, 1, 2]))
+ [(0,), (1,), (2,), (0, 1), (1, 2), (0, 1, 2)]
+
+ """
+ # The length-1 substrings
+ seq = []
+ for item in iter(iterable):
+ seq.append(item)
+ yield (item,)
+ seq = tuple(seq)
+ item_count = len(seq)
+
+ # And the rest
+ for n in range(2, item_count + 1):
+ for i in range(item_count - n + 1):
+ yield seq[i : i + n]
+
+
+def substrings_indexes(seq, reverse=False):
+ """Yield all substrings and their positions in *seq*
+
+ The items yielded will be a tuple of the form ``(substr, i, j)``, where
+ ``substr == seq[i:j]``.
+
+ This function only works for iterables that support slicing, such as
+ ``str`` objects.
+
+ >>> for item in substrings_indexes('more'):
+ ... print(item)
+ ('m', 0, 1)
+ ('o', 1, 2)
+ ('r', 2, 3)
+ ('e', 3, 4)
+ ('mo', 0, 2)
+ ('or', 1, 3)
+ ('re', 2, 4)
+ ('mor', 0, 3)
+ ('ore', 1, 4)
+ ('more', 0, 4)
+
+ Set *reverse* to ``True`` to yield the same items in the opposite order.
+
+
+ """
+ r = range(1, len(seq) + 1)
+ if reverse:
+ r = reversed(r)
+ return (
+ (seq[i : i + L], i, i + L) for L in r for i in range(len(seq) - L + 1)
+ )
+
+
+class bucket:
+ """Wrap *iterable* and return an object that buckets the iterable into
+ child iterables based on a *key* function.
+
+ >>> iterable = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2', 'b3']
+ >>> s = bucket(iterable, key=lambda x: x[0]) # Bucket by 1st character
+ >>> sorted(list(s)) # Get the keys
+ ['a', 'b', 'c']
+ >>> a_iterable = s['a']
+ >>> next(a_iterable)
+ 'a1'
+ >>> next(a_iterable)
+ 'a2'
+ >>> list(s['b'])
+ ['b1', 'b2', 'b3']
+
+ The original iterable will be advanced and its items will be cached until
+ they are used by the child iterables. This may require significant storage.
+
+ By default, attempting to select a bucket to which no items belong will
+ exhaust the iterable and cache all values.
+ If you specify a *validator* function, selected buckets will instead be
+ checked against it.
+
+ >>> from itertools import count
+ >>> it = count(1, 2) # Infinite sequence of odd numbers
+ >>> key = lambda x: x % 10 # Bucket by last digit
+ >>> validator = lambda x: x in {1, 3, 5, 7, 9} # Odd digits only
+ >>> s = bucket(it, key=key, validator=validator)
+ >>> 2 in s
+ False
+ >>> list(s[2])
+ []
+
+ """
+
+ def __init__(self, iterable, key, validator=None):
+ self._it = iter(iterable)
+ self._key = key
+ self._cache = defaultdict(deque)
+ self._validator = validator or (lambda x: True)
+
+ def __contains__(self, value):
+ if not self._validator(value):
+ return False
+
+ try:
+ item = next(self[value])
+ except StopIteration:
+ return False
+ else:
+ self._cache[value].appendleft(item)
+
+ return True
+
+ def _get_values(self, value):
+ """
+ Helper to yield items from the parent iterator that match *value*.
+ Items that don't match are stored in the local cache as they
+ are encountered.
+ """
+ while True:
+ # If we've cached some items that match the target value, emit
+ # the first one and evict it from the cache.
+ if self._cache[value]:
+ yield self._cache[value].popleft()
+ # Otherwise we need to advance the parent iterator to search for
+ # a matching item, caching the rest.
+ else:
+ while True:
+ try:
+ item = next(self._it)
+ except StopIteration:
+ return
+ item_value = self._key(item)
+ if item_value == value:
+ yield item
+ break
+ elif self._validator(item_value):
+ self._cache[item_value].append(item)
+
+ def __iter__(self):
+ for item in self._it:
+ item_value = self._key(item)
+ if self._validator(item_value):
+ self._cache[item_value].append(item)
+
+ yield from self._cache.keys()
+
+ def __getitem__(self, value):
+ if not self._validator(value):
+ return iter(())
+
+ return self._get_values(value)
+
+
+def spy(iterable, n=1):
+ """Return a 2-tuple with a list containing the first *n* elements of
+ *iterable*, and an iterator with the same items as *iterable*.
+ This allows you to "look ahead" at the items in the iterable without
+ advancing it.
+
+ There is one item in the list by default:
+
+ >>> iterable = 'abcdefg'
+ >>> head, iterable = spy(iterable)
+ >>> head
+ ['a']
+ >>> list(iterable)
+ ['a', 'b', 'c', 'd', 'e', 'f', 'g']
+
+ You may use unpacking to retrieve items instead of lists:
+
+ >>> (head,), iterable = spy('abcdefg')
+ >>> head
+ 'a'
+ >>> (first, second), iterable = spy('abcdefg', 2)
+ >>> first
+ 'a'
+ >>> second
+ 'b'
+
+ The number of items requested can be larger than the number of items in
+ the iterable:
+
+ >>> iterable = [1, 2, 3, 4, 5]
+ >>> head, iterable = spy(iterable, 10)
+ >>> head
+ [1, 2, 3, 4, 5]
+ >>> list(iterable)
+ [1, 2, 3, 4, 5]
+
+ """
+ it = iter(iterable)
+ head = take(n, it)
+
+ return head.copy(), chain(head, it)
+
+
+def interleave(*iterables):
+ """Return a new iterable yielding from each iterable in turn,
+ until the shortest is exhausted.
+
+ >>> list(interleave([1, 2, 3], [4, 5], [6, 7, 8]))
+ [1, 4, 6, 2, 5, 7]
+
+ For a version that doesn't terminate after the shortest iterable is
+ exhausted, see :func:`interleave_longest`.
+
+ """
+ return chain.from_iterable(zip(*iterables))
+
+
+def interleave_longest(*iterables):
+ """Return a new iterable yielding from each iterable in turn,
+ skipping any that are exhausted.
+
+ >>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8]))
+ [1, 4, 6, 2, 5, 7, 3, 8]
+
+ This function produces the same output as :func:`roundrobin`, but may
+ perform better for some inputs (in particular when the number of iterables
+ is large).
+
+ """
+ i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker))
+ return (x for x in i if x is not _marker)
+
+
+def interleave_evenly(iterables, lengths=None):
+ """
+ Interleave multiple iterables so that their elements are evenly distributed
+ throughout the output sequence.
+
+ >>> iterables = [1, 2, 3, 4, 5], ['a', 'b']
+ >>> list(interleave_evenly(iterables))
+ [1, 2, 'a', 3, 4, 'b', 5]
+
+ >>> iterables = [[1, 2, 3], [4, 5], [6, 7, 8]]
+ >>> list(interleave_evenly(iterables))
+ [1, 6, 4, 2, 7, 3, 8, 5]
+
+ This function requires iterables of known length. Iterables without
+ ``__len__()`` can be used by manually specifying lengths with *lengths*:
+
+ >>> from itertools import combinations, repeat
+ >>> iterables = [combinations(range(4), 2), ['a', 'b', 'c']]
+ >>> lengths = [4 * (4 - 1) // 2, 3]
+ >>> list(interleave_evenly(iterables, lengths=lengths))
+ [(0, 1), (0, 2), 'a', (0, 3), (1, 2), 'b', (1, 3), (2, 3), 'c']
+
+ Based on Bresenham's algorithm.
+ """
+ if lengths is None:
+ try:
+ lengths = [len(it) for it in iterables]
+ except TypeError:
+ raise ValueError(
+ 'Iterable lengths could not be determined automatically. '
+ 'Specify them with the lengths keyword.'
+ )
+ elif len(iterables) != len(lengths):
+ raise ValueError('Mismatching number of iterables and lengths.')
+
+ dims = len(lengths)
+
+ # sort iterables by length, descending
+ lengths_permute = sorted(
+ range(dims), key=lambda i: lengths[i], reverse=True
+ )
+ lengths_desc = [lengths[i] for i in lengths_permute]
+ iters_desc = [iter(iterables[i]) for i in lengths_permute]
+
+ # the longest iterable is the primary one (Bresenham: the longest
+ # distance along an axis)
+ delta_primary, deltas_secondary = lengths_desc[0], lengths_desc[1:]
+ iter_primary, iters_secondary = iters_desc[0], iters_desc[1:]
+ errors = [delta_primary // dims] * len(deltas_secondary)
+
+ to_yield = sum(lengths)
+ while to_yield:
+ yield next(iter_primary)
+ to_yield -= 1
+ # update errors for each secondary iterable
+ errors = [e - delta for e, delta in zip(errors, deltas_secondary)]
+
+ # those iterables for which the error is negative are yielded
+ # ("diagonal step" in Bresenham)
+ for i, e in enumerate(errors):
+ if e < 0:
+ yield next(iters_secondary[i])
+ to_yield -= 1
+ errors[i] += delta_primary
+
+
+def collapse(iterable, base_type=None, levels=None):
+ """Flatten an iterable with multiple levels of nesting (e.g., a list of
+ lists of tuples) into non-iterable types.
+
+ >>> iterable = [(1, 2), ([3, 4], [[5], [6]])]
+ >>> list(collapse(iterable))
+ [1, 2, 3, 4, 5, 6]
+
+ Binary and text strings are not considered iterable and
+ will not be collapsed.
+
+ To avoid collapsing other types, specify *base_type*:
+
+ >>> iterable = ['ab', ('cd', 'ef'), ['gh', 'ij']]
+ >>> list(collapse(iterable, base_type=tuple))
+ ['ab', ('cd', 'ef'), 'gh', 'ij']
+
+ Specify *levels* to stop flattening after a certain level:
+
+ >>> iterable = [('a', ['b']), ('c', ['d'])]
+ >>> list(collapse(iterable)) # Fully flattened
+ ['a', 'b', 'c', 'd']
+ >>> list(collapse(iterable, levels=1)) # Only one level flattened
+ ['a', ['b'], 'c', ['d']]
+
+ """
+
+ def walk(node, level):
+ if (
+ ((levels is not None) and (level > levels))
+ or isinstance(node, (str, bytes))
+ or ((base_type is not None) and isinstance(node, base_type))
+ ):
+ yield node
+ return
+
+ try:
+ tree = iter(node)
+ except TypeError:
+ yield node
+ return
+ else:
+ for child in tree:
+ yield from walk(child, level + 1)
+
+ yield from walk(iterable, 0)
+
+
+def side_effect(func, iterable, chunk_size=None, before=None, after=None):
+ """Invoke *func* on each item in *iterable* (or on each *chunk_size* group
+ of items) before yielding the item.
+
+ `func` must be a function that takes a single argument. Its return value
+ will be discarded.
+
+ *before* and *after* are optional functions that take no arguments. They
+ will be executed before iteration starts and after it ends, respectively.
+
+ `side_effect` can be used for logging, updating progress bars, or anything
+ that is not functionally "pure."
+
+ Emitting a status message:
+
+ >>> from more_itertools import consume
+ >>> func = lambda item: print('Received {}'.format(item))
+ >>> consume(side_effect(func, range(2)))
+ Received 0
+ Received 1
+
+ Operating on chunks of items:
+
+ >>> pair_sums = []
+ >>> func = lambda chunk: pair_sums.append(sum(chunk))
+ >>> list(side_effect(func, [0, 1, 2, 3, 4, 5], 2))
+ [0, 1, 2, 3, 4, 5]
+ >>> list(pair_sums)
+ [1, 5, 9]
+
+ Writing to a file-like object:
+
+ >>> from io import StringIO
+ >>> from more_itertools import consume
+ >>> f = StringIO()
+ >>> func = lambda x: print(x, file=f)
+ >>> before = lambda: print(u'HEADER', file=f)
+ >>> after = f.close
+ >>> it = [u'a', u'b', u'c']
+ >>> consume(side_effect(func, it, before=before, after=after))
+ >>> f.closed
+ True
+
+ """
+ try:
+ if before is not None:
+ before()
+
+ if chunk_size is None:
+ for item in iterable:
+ func(item)
+ yield item
+ else:
+ for chunk in chunked(iterable, chunk_size):
+ func(chunk)
+ yield from chunk
+ finally:
+ if after is not None:
+ after()
+
+
+def sliced(seq, n, strict=False):
+ """Yield slices of length *n* from the sequence *seq*.
+
+ >>> list(sliced((1, 2, 3, 4, 5, 6), 3))
+ [(1, 2, 3), (4, 5, 6)]
+
+ By the default, the last yielded slice will have fewer than *n* elements
+ if the length of *seq* is not divisible by *n*:
+
+ >>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3))
+ [(1, 2, 3), (4, 5, 6), (7, 8)]
+
+ If the length of *seq* is not divisible by *n* and *strict* is
+ ``True``, then ``ValueError`` will be raised before the last
+ slice is yielded.
+
+ This function will only work for iterables that support slicing.
+ For non-sliceable iterables, see :func:`chunked`.
+
+ """
+ iterator = takewhile(len, (seq[i : i + n] for i in count(0, n)))
+ if strict:
+
+ def ret():
+ for _slice in iterator:
+ if len(_slice) != n:
+ raise ValueError("seq is not divisible by n.")
+ yield _slice
+
+ return iter(ret())
+ else:
+ return iterator
+
+
+def split_at(iterable, pred, maxsplit=-1, keep_separator=False):
+ """Yield lists of items from *iterable*, where each list is delimited by
+ an item where callable *pred* returns ``True``.
+
+ >>> list(split_at('abcdcba', lambda x: x == 'b'))
+ [['a'], ['c', 'd', 'c'], ['a']]
+
+ >>> list(split_at(range(10), lambda n: n % 2 == 1))
+ [[0], [2], [4], [6], [8], []]
+
+ At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
+ then there is no limit on the number of splits:
+
+ >>> list(split_at(range(10), lambda n: n % 2 == 1, maxsplit=2))
+ [[0], [2], [4, 5, 6, 7, 8, 9]]
+
+ By default, the delimiting items are not included in the output.
+ To include them, set *keep_separator* to ``True``.
+
+ >>> list(split_at('abcdcba', lambda x: x == 'b', keep_separator=True))
+ [['a'], ['b'], ['c', 'd', 'c'], ['b'], ['a']]
+
+ """
+ if maxsplit == 0:
+ yield list(iterable)
+ return
+
+ buf = []
+ it = iter(iterable)
+ for item in it:
+ if pred(item):
+ yield buf
+ if keep_separator:
+ yield [item]
+ if maxsplit == 1:
+ yield list(it)
+ return
+ buf = []
+ maxsplit -= 1
+ else:
+ buf.append(item)
+ yield buf
+
+
+def split_before(iterable, pred, maxsplit=-1):
+ """Yield lists of items from *iterable*, where each list ends just before
+ an item for which callable *pred* returns ``True``:
+
+ >>> list(split_before('OneTwo', lambda s: s.isupper()))
+ [['O', 'n', 'e'], ['T', 'w', 'o']]
+
+ >>> list(split_before(range(10), lambda n: n % 3 == 0))
+ [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
+
+ At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
+ then there is no limit on the number of splits:
+
+ >>> list(split_before(range(10), lambda n: n % 3 == 0, maxsplit=2))
+ [[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]]
+ """
+ if maxsplit == 0:
+ yield list(iterable)
+ return
+
+ buf = []
+ it = iter(iterable)
+ for item in it:
+ if pred(item) and buf:
+ yield buf
+ if maxsplit == 1:
+ yield [item] + list(it)
+ return
+ buf = []
+ maxsplit -= 1
+ buf.append(item)
+ if buf:
+ yield buf
+
+
+def split_after(iterable, pred, maxsplit=-1):
+ """Yield lists of items from *iterable*, where each list ends with an
+ item where callable *pred* returns ``True``:
+
+ >>> list(split_after('one1two2', lambda s: s.isdigit()))
+ [['o', 'n', 'e', '1'], ['t', 'w', 'o', '2']]
+
+ >>> list(split_after(range(10), lambda n: n % 3 == 0))
+ [[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]]
+
+ At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
+ then there is no limit on the number of splits:
+
+ >>> list(split_after(range(10), lambda n: n % 3 == 0, maxsplit=2))
+ [[0], [1, 2, 3], [4, 5, 6, 7, 8, 9]]
+
+ """
+ if maxsplit == 0:
+ yield list(iterable)
+ return
+
+ buf = []
+ it = iter(iterable)
+ for item in it:
+ buf.append(item)
+ if pred(item) and buf:
+ yield buf
+ if maxsplit == 1:
+ buf = list(it)
+ if buf:
+ yield buf
+ return
+ buf = []
+ maxsplit -= 1
+ if buf:
+ yield buf
+
+
+def split_when(iterable, pred, maxsplit=-1):
+ """Split *iterable* into pieces based on the output of *pred*.
+ *pred* should be a function that takes successive pairs of items and
+ returns ``True`` if the iterable should be split in between them.
+
+ For example, to find runs of increasing numbers, split the iterable when
+ element ``i`` is larger than element ``i + 1``:
+
+ >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], lambda x, y: x > y))
+ [[1, 2, 3, 3], [2, 5], [2, 4], [2]]
+
+ At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
+ then there is no limit on the number of splits:
+
+ >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2],
+ ... lambda x, y: x > y, maxsplit=2))
+ [[1, 2, 3, 3], [2, 5], [2, 4, 2]]
+
+ """
+ if maxsplit == 0:
+ yield list(iterable)
+ return
+
+ it = iter(iterable)
+ try:
+ cur_item = next(it)
+ except StopIteration:
+ return
+
+ buf = [cur_item]
+ for next_item in it:
+ if pred(cur_item, next_item):
+ yield buf
+ if maxsplit == 1:
+ yield [next_item] + list(it)
+ return
+ buf = []
+ maxsplit -= 1
+
+ buf.append(next_item)
+ cur_item = next_item
+
+ yield buf
+
+
+def split_into(iterable, sizes):
+ """Yield a list of sequential items from *iterable* of length 'n' for each
+ integer 'n' in *sizes*.
+
+ >>> list(split_into([1,2,3,4,5,6], [1,2,3]))
+ [[1], [2, 3], [4, 5, 6]]
+
+ If the sum of *sizes* is smaller than the length of *iterable*, then the
+ remaining items of *iterable* will not be returned.
+
+ >>> list(split_into([1,2,3,4,5,6], [2,3]))
+ [[1, 2], [3, 4, 5]]
+
+ If the sum of *sizes* is larger than the length of *iterable*, fewer items
+ will be returned in the iteration that overruns *iterable* and further
+ lists will be empty:
+
+ >>> list(split_into([1,2,3,4], [1,2,3,4]))
+ [[1], [2, 3], [4], []]
+
+ When a ``None`` object is encountered in *sizes*, the returned list will
+ contain items up to the end of *iterable* the same way that itertools.slice
+ does:
+
+ >>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None]))
+ [[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]]
+
+ :func:`split_into` can be useful for grouping a series of items where the
+ sizes of the groups are not uniform. An example would be where in a row
+ from a table, multiple columns represent elements of the same feature
+ (e.g. a point represented by x,y,z) but, the format is not the same for
+ all columns.
+ """
+ # convert the iterable argument into an iterator so its contents can
+ # be consumed by islice in case it is a generator
+ it = iter(iterable)
+
+ for size in sizes:
+ if size is None:
+ yield list(it)
+ return
+ else:
+ yield list(islice(it, size))
+
+
+def padded(iterable, fillvalue=None, n=None, next_multiple=False):
+ """Yield the elements from *iterable*, followed by *fillvalue*, such that
+ at least *n* items are emitted.
+
+ >>> list(padded([1, 2, 3], '?', 5))
+ [1, 2, 3, '?', '?']
+
+ If *next_multiple* is ``True``, *fillvalue* will be emitted until the
+ number of items emitted is a multiple of *n*::
+
+ >>> list(padded([1, 2, 3, 4], n=3, next_multiple=True))
+ [1, 2, 3, 4, None, None]
+
+ If *n* is ``None``, *fillvalue* will be emitted indefinitely.
+
+ """
+ it = iter(iterable)
+ if n is None:
+ yield from chain(it, repeat(fillvalue))
+ elif n < 1:
+ raise ValueError('n must be at least 1')
+ else:
+ item_count = 0
+ for item in it:
+ yield item
+ item_count += 1
+
+ remaining = (n - item_count) % n if next_multiple else n - item_count
+ for _ in range(remaining):
+ yield fillvalue
+
+
+def repeat_each(iterable, n=2):
+ """Repeat each element in *iterable* *n* times.
+
+ >>> list(repeat_each('ABC', 3))
+ ['A', 'A', 'A', 'B', 'B', 'B', 'C', 'C', 'C']
+ """
+ return chain.from_iterable(map(repeat, iterable, repeat(n)))
+
+
+def repeat_last(iterable, default=None):
+ """After the *iterable* is exhausted, keep yielding its last element.
+
+ >>> list(islice(repeat_last(range(3)), 5))
+ [0, 1, 2, 2, 2]
+
+ If the iterable is empty, yield *default* forever::
+
+ >>> list(islice(repeat_last(range(0), 42), 5))
+ [42, 42, 42, 42, 42]
+
+ """
+ item = _marker
+ for item in iterable:
+ yield item
+ final = default if item is _marker else item
+ yield from repeat(final)
+
+
+def distribute(n, iterable):
+ """Distribute the items from *iterable* among *n* smaller iterables.
+
+ >>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6])
+ >>> list(group_1)
+ [1, 3, 5]
+ >>> list(group_2)
+ [2, 4, 6]
+
+ If the length of *iterable* is not evenly divisible by *n*, then the
+ length of the returned iterables will not be identical:
+
+ >>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7])
+ >>> [list(c) for c in children]
+ [[1, 4, 7], [2, 5], [3, 6]]
+
+ If the length of *iterable* is smaller than *n*, then the last returned
+ iterables will be empty:
+
+ >>> children = distribute(5, [1, 2, 3])
+ >>> [list(c) for c in children]
+ [[1], [2], [3], [], []]
+
+ This function uses :func:`itertools.tee` and may require significant
+ storage. If you need the order items in the smaller iterables to match the
+ original iterable, see :func:`divide`.
+
+ """
+ if n < 1:
+ raise ValueError('n must be at least 1')
+
+ children = tee(iterable, n)
+ return [islice(it, index, None, n) for index, it in enumerate(children)]
+
+
+def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None):
+ """Yield tuples whose elements are offset from *iterable*.
+ The amount by which the `i`-th item in each tuple is offset is given by
+ the `i`-th item in *offsets*.
+
+ >>> list(stagger([0, 1, 2, 3]))
+ [(None, 0, 1), (0, 1, 2), (1, 2, 3)]
+ >>> list(stagger(range(8), offsets=(0, 2, 4)))
+ [(0, 2, 4), (1, 3, 5), (2, 4, 6), (3, 5, 7)]
+
+ By default, the sequence will end when the final element of a tuple is the
+ last item in the iterable. To continue until the first element of a tuple
+ is the last item in the iterable, set *longest* to ``True``::
+
+ >>> list(stagger([0, 1, 2, 3], longest=True))
+ [(None, 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, None), (3, None, None)]
+
+ By default, ``None`` will be used to replace offsets beyond the end of the
+ sequence. Specify *fillvalue* to use some other value.
+
+ """
+ children = tee(iterable, len(offsets))
+
+ return zip_offset(
+ *children, offsets=offsets, longest=longest, fillvalue=fillvalue
+ )
+
+
+def zip_equal(*iterables):
+ """``zip`` the input *iterables* together, but raise
+ ``UnequalIterablesError`` if they aren't all the same length.
+
+ >>> it_1 = range(3)
+ >>> it_2 = iter('abc')
+ >>> list(zip_equal(it_1, it_2))
+ [(0, 'a'), (1, 'b'), (2, 'c')]
+
+ >>> it_1 = range(3)
+ >>> it_2 = iter('abcd')
+ >>> list(zip_equal(it_1, it_2)) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ more_itertools.more.UnequalIterablesError: Iterables have different
+ lengths
+
+ """
+ if hexversion >= 0x30A00A6:
+ warnings.warn(
+ (
+ 'zip_equal will be removed in a future version of '
+ 'more-itertools. Use the builtin zip function with '
+ 'strict=True instead.'
+ ),
+ DeprecationWarning,
+ )
+
+ return _zip_equal(*iterables)
+
+
+def zip_offset(*iterables, offsets, longest=False, fillvalue=None):
+ """``zip`` the input *iterables* together, but offset the `i`-th iterable
+ by the `i`-th item in *offsets*.
+
+ >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1)))
+ [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e')]
+
+ This can be used as a lightweight alternative to SciPy or pandas to analyze
+ data sets in which some series have a lead or lag relationship.
+
+ By default, the sequence will end when the shortest iterable is exhausted.
+ To continue until the longest iterable is exhausted, set *longest* to
+ ``True``.
+
+ >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1), longest=True))
+ [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e'), (None, 'f')]
+
+ By default, ``None`` will be used to replace offsets beyond the end of the
+ sequence. Specify *fillvalue* to use some other value.
+
+ """
+ if len(iterables) != len(offsets):
+ raise ValueError("Number of iterables and offsets didn't match")
+
+ staggered = []
+ for it, n in zip(iterables, offsets):
+ if n < 0:
+ staggered.append(chain(repeat(fillvalue, -n), it))
+ elif n > 0:
+ staggered.append(islice(it, n, None))
+ else:
+ staggered.append(it)
+
+ if longest:
+ return zip_longest(*staggered, fillvalue=fillvalue)
+
+ return zip(*staggered)
+
+
+def sort_together(iterables, key_list=(0,), key=None, reverse=False):
+ """Return the input iterables sorted together, with *key_list* as the
+ priority for sorting. All iterables are trimmed to the length of the
+ shortest one.
+
+ This can be used like the sorting function in a spreadsheet. If each
+ iterable represents a column of data, the key list determines which
+ columns are used for sorting.
+
+ By default, all iterables are sorted using the ``0``-th iterable::
+
+ >>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')]
+ >>> sort_together(iterables)
+ [(1, 2, 3, 4), ('d', 'c', 'b', 'a')]
+
+ Set a different key list to sort according to another iterable.
+ Specifying multiple keys dictates how ties are broken::
+
+ >>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')]
+ >>> sort_together(iterables, key_list=(1, 2))
+ [(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')]
+
+ To sort by a function of the elements of the iterable, pass a *key*
+ function. Its arguments are the elements of the iterables corresponding to
+ the key list::
+
+ >>> names = ('a', 'b', 'c')
+ >>> lengths = (1, 2, 3)
+ >>> widths = (5, 2, 1)
+ >>> def area(length, width):
+ ... return length * width
+ >>> sort_together([names, lengths, widths], key_list=(1, 2), key=area)
+ [('c', 'b', 'a'), (3, 2, 1), (1, 2, 5)]
+
+ Set *reverse* to ``True`` to sort in descending order.
+
+ >>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True)
+ [(3, 2, 1), ('a', 'b', 'c')]
+
+ """
+ if key is None:
+ # if there is no key function, the key argument to sorted is an
+ # itemgetter
+ key_argument = itemgetter(*key_list)
+ else:
+ # if there is a key function, call it with the items at the offsets
+ # specified by the key function as arguments
+ key_list = list(key_list)
+ if len(key_list) == 1:
+ # if key_list contains a single item, pass the item at that offset
+ # as the only argument to the key function
+ key_offset = key_list[0]
+ key_argument = lambda zipped_items: key(zipped_items[key_offset])
+ else:
+ # if key_list contains multiple items, use itemgetter to return a
+ # tuple of items, which we pass as *args to the key function
+ get_key_items = itemgetter(*key_list)
+ key_argument = lambda zipped_items: key(
+ *get_key_items(zipped_items)
+ )
+
+ return list(
+ zip(*sorted(zip(*iterables), key=key_argument, reverse=reverse))
+ )
+
+
+def unzip(iterable):
+ """The inverse of :func:`zip`, this function disaggregates the elements
+ of the zipped *iterable*.
+
+ The ``i``-th iterable contains the ``i``-th element from each element
+ of the zipped iterable. The first element is used to determine the
+ length of the remaining elements.
+
+ >>> iterable = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
+ >>> letters, numbers = unzip(iterable)
+ >>> list(letters)
+ ['a', 'b', 'c', 'd']
+ >>> list(numbers)
+ [1, 2, 3, 4]
+
+ This is similar to using ``zip(*iterable)``, but it avoids reading
+ *iterable* into memory. Note, however, that this function uses
+ :func:`itertools.tee` and thus may require significant storage.
+
+ """
+ head, iterable = spy(iter(iterable))
+ if not head:
+ # empty iterable, e.g. zip([], [], [])
+ return ()
+ # spy returns a one-length iterable as head
+ head = head[0]
+ iterables = tee(iterable, len(head))
+
+ def itemgetter(i):
+ def getter(obj):
+ try:
+ return obj[i]
+ except IndexError:
+ # basically if we have an iterable like
+ # iter([(1, 2, 3), (4, 5), (6,)])
+ # the second unzipped iterable would fail at the third tuple
+ # since it would try to access tup[1]
+ # same with the third unzipped iterable and the second tuple
+ # to support these "improperly zipped" iterables,
+ # we create a custom itemgetter
+ # which just stops the unzipped iterables
+ # at first length mismatch
+ raise StopIteration
+
+ return getter
+
+ return tuple(map(itemgetter(i), it) for i, it in enumerate(iterables))
+
+
+def divide(n, iterable):
+ """Divide the elements from *iterable* into *n* parts, maintaining
+ order.
+
+ >>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6])
+ >>> list(group_1)
+ [1, 2, 3]
+ >>> list(group_2)
+ [4, 5, 6]
+
+ If the length of *iterable* is not evenly divisible by *n*, then the
+ length of the returned iterables will not be identical:
+
+ >>> children = divide(3, [1, 2, 3, 4, 5, 6, 7])
+ >>> [list(c) for c in children]
+ [[1, 2, 3], [4, 5], [6, 7]]
+
+ If the length of the iterable is smaller than n, then the last returned
+ iterables will be empty:
+
+ >>> children = divide(5, [1, 2, 3])
+ >>> [list(c) for c in children]
+ [[1], [2], [3], [], []]
+
+ This function will exhaust the iterable before returning and may require
+ significant storage. If order is not important, see :func:`distribute`,
+ which does not first pull the iterable into memory.
+
+ """
+ if n < 1:
+ raise ValueError('n must be at least 1')
+
+ try:
+ iterable[:0]
+ except TypeError:
+ seq = tuple(iterable)
+ else:
+ seq = iterable
+
+ q, r = divmod(len(seq), n)
+
+ ret = []
+ stop = 0
+ for i in range(1, n + 1):
+ start = stop
+ stop += q + 1 if i <= r else q
+ ret.append(iter(seq[start:stop]))
+
+ return ret
+
+
+def always_iterable(obj, base_type=(str, bytes)):
+ """If *obj* is iterable, return an iterator over its items::
+
+ >>> obj = (1, 2, 3)
+ >>> list(always_iterable(obj))
+ [1, 2, 3]
+
+ If *obj* is not iterable, return a one-item iterable containing *obj*::
+
+ >>> obj = 1
+ >>> list(always_iterable(obj))
+ [1]
+
+ If *obj* is ``None``, return an empty iterable:
+
+ >>> obj = None
+ >>> list(always_iterable(None))
+ []
+
+ By default, binary and text strings are not considered iterable::
+
+ >>> obj = 'foo'
+ >>> list(always_iterable(obj))
+ ['foo']
+
+ If *base_type* is set, objects for which ``isinstance(obj, base_type)``
+ returns ``True`` won't be considered iterable.
+
+ >>> obj = {'a': 1}
+ >>> list(always_iterable(obj)) # Iterate over the dict's keys
+ ['a']
+ >>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit
+ [{'a': 1}]
+
+ Set *base_type* to ``None`` to avoid any special handling and treat objects
+ Python considers iterable as iterable:
+
+ >>> obj = 'foo'
+ >>> list(always_iterable(obj, base_type=None))
+ ['f', 'o', 'o']
+ """
+ if obj is None:
+ return iter(())
+
+ if (base_type is not None) and isinstance(obj, base_type):
+ return iter((obj,))
+
+ try:
+ return iter(obj)
+ except TypeError:
+ return iter((obj,))
+
+
+def adjacent(predicate, iterable, distance=1):
+ """Return an iterable over `(bool, item)` tuples where the `item` is
+ drawn from *iterable* and the `bool` indicates whether
+ that item satisfies the *predicate* or is adjacent to an item that does.
+
+ For example, to find whether items are adjacent to a ``3``::
+
+ >>> list(adjacent(lambda x: x == 3, range(6)))
+ [(False, 0), (False, 1), (True, 2), (True, 3), (True, 4), (False, 5)]
+
+ Set *distance* to change what counts as adjacent. For example, to find
+ whether items are two places away from a ``3``:
+
+ >>> list(adjacent(lambda x: x == 3, range(6), distance=2))
+ [(False, 0), (True, 1), (True, 2), (True, 3), (True, 4), (True, 5)]
+
+ This is useful for contextualizing the results of a search function.
+ For example, a code comparison tool might want to identify lines that
+ have changed, but also surrounding lines to give the viewer of the diff
+ context.
+
+ The predicate function will only be called once for each item in the
+ iterable.
+
+ See also :func:`groupby_transform`, which can be used with this function
+ to group ranges of items with the same `bool` value.
+
+ """
+ # Allow distance=0 mainly for testing that it reproduces results with map()
+ if distance < 0:
+ raise ValueError('distance must be at least 0')
+
+ i1, i2 = tee(iterable)
+ padding = [False] * distance
+ selected = chain(padding, map(predicate, i1), padding)
+ adjacent_to_selected = map(any, windowed(selected, 2 * distance + 1))
+ return zip(adjacent_to_selected, i2)
+
+
+def groupby_transform(iterable, keyfunc=None, valuefunc=None, reducefunc=None):
+ """An extension of :func:`itertools.groupby` that can apply transformations
+ to the grouped data.
+
+ * *keyfunc* is a function computing a key value for each item in *iterable*
+ * *valuefunc* is a function that transforms the individual items from
+ *iterable* after grouping
+ * *reducefunc* is a function that transforms each group of items
+
+ >>> iterable = 'aAAbBBcCC'
+ >>> keyfunc = lambda k: k.upper()
+ >>> valuefunc = lambda v: v.lower()
+ >>> reducefunc = lambda g: ''.join(g)
+ >>> list(groupby_transform(iterable, keyfunc, valuefunc, reducefunc))
+ [('A', 'aaa'), ('B', 'bbb'), ('C', 'ccc')]
+
+ Each optional argument defaults to an identity function if not specified.
+
+ :func:`groupby_transform` is useful when grouping elements of an iterable
+ using a separate iterable as the key. To do this, :func:`zip` the iterables
+ and pass a *keyfunc* that extracts the first element and a *valuefunc*
+ that extracts the second element::
+
+ >>> from operator import itemgetter
+ >>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3]
+ >>> values = 'abcdefghi'
+ >>> iterable = zip(keys, values)
+ >>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1))
+ >>> [(k, ''.join(g)) for k, g in grouper]
+ [(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')]
+
+ Note that the order of items in the iterable is significant.
+ Only adjacent items are grouped together, so if you don't want any
+ duplicate groups, you should sort the iterable by the key function.
+
+ """
+ ret = groupby(iterable, keyfunc)
+ if valuefunc:
+ ret = ((k, map(valuefunc, g)) for k, g in ret)
+ if reducefunc:
+ ret = ((k, reducefunc(g)) for k, g in ret)
+
+ return ret
+
+
+class numeric_range(abc.Sequence, abc.Hashable):
+ """An extension of the built-in ``range()`` function whose arguments can
+ be any orderable numeric type.
+
+ With only *stop* specified, *start* defaults to ``0`` and *step*
+ defaults to ``1``. The output items will match the type of *stop*:
+
+ >>> list(numeric_range(3.5))
+ [0.0, 1.0, 2.0, 3.0]
+
+ With only *start* and *stop* specified, *step* defaults to ``1``. The
+ output items will match the type of *start*:
+
+ >>> from decimal import Decimal
+ >>> start = Decimal('2.1')
+ >>> stop = Decimal('5.1')
+ >>> list(numeric_range(start, stop))
+ [Decimal('2.1'), Decimal('3.1'), Decimal('4.1')]
+
+ With *start*, *stop*, and *step* specified the output items will match
+ the type of ``start + step``:
+
+ >>> from fractions import Fraction
+ >>> start = Fraction(1, 2) # Start at 1/2
+ >>> stop = Fraction(5, 2) # End at 5/2
+ >>> step = Fraction(1, 2) # Count by 1/2
+ >>> list(numeric_range(start, stop, step))
+ [Fraction(1, 2), Fraction(1, 1), Fraction(3, 2), Fraction(2, 1)]
+
+ If *step* is zero, ``ValueError`` is raised. Negative steps are supported:
+
+ >>> list(numeric_range(3, -1, -1.0))
+ [3.0, 2.0, 1.0, 0.0]
+
+ Be aware of the limitations of floating point numbers; the representation
+ of the yielded numbers may be surprising.
+
+ ``datetime.datetime`` objects can be used for *start* and *stop*, if *step*
+ is a ``datetime.timedelta`` object:
+
+ >>> import datetime
+ >>> start = datetime.datetime(2019, 1, 1)
+ >>> stop = datetime.datetime(2019, 1, 3)
+ >>> step = datetime.timedelta(days=1)
+ >>> items = iter(numeric_range(start, stop, step))
+ >>> next(items)
+ datetime.datetime(2019, 1, 1, 0, 0)
+ >>> next(items)
+ datetime.datetime(2019, 1, 2, 0, 0)
+
+ """
+
+ _EMPTY_HASH = hash(range(0, 0))
+
+ def __init__(self, *args):
+ argc = len(args)
+ if argc == 1:
+ (self._stop,) = args
+ self._start = type(self._stop)(0)
+ self._step = type(self._stop - self._start)(1)
+ elif argc == 2:
+ self._start, self._stop = args
+ self._step = type(self._stop - self._start)(1)
+ elif argc == 3:
+ self._start, self._stop, self._step = args
+ elif argc == 0:
+ raise TypeError(
+ 'numeric_range expected at least '
+ '1 argument, got {}'.format(argc)
+ )
+ else:
+ raise TypeError(
+ 'numeric_range expected at most '
+ '3 arguments, got {}'.format(argc)
+ )
+
+ self._zero = type(self._step)(0)
+ if self._step == self._zero:
+ raise ValueError('numeric_range() arg 3 must not be zero')
+ self._growing = self._step > self._zero
+
+ def __bool__(self):
+ if self._growing:
+ return self._start < self._stop
+ else:
+ return self._start > self._stop
+
+ def __contains__(self, elem):
+ if self._growing:
+ if self._start <= elem < self._stop:
+ return (elem - self._start) % self._step == self._zero
+ else:
+ if self._start >= elem > self._stop:
+ return (self._start - elem) % (-self._step) == self._zero
+
+ return False
+
+ def __eq__(self, other):
+ if isinstance(other, numeric_range):
+ empty_self = not bool(self)
+ empty_other = not bool(other)
+ if empty_self or empty_other:
+ return empty_self and empty_other # True if both empty
+ else:
+ return (
+ self._start == other._start
+ and self._step == other._step
+ and self._get_by_index(-1) == other._get_by_index(-1)
+ )
+ else:
+ return False
+
+ def __getitem__(self, key):
+ if isinstance(key, int):
+ return self._get_by_index(key)
+ elif isinstance(key, slice):
+ step = self._step if key.step is None else key.step * self._step
+
+ if key.start is None or key.start <= -self._len:
+ start = self._start
+ elif key.start >= self._len:
+ start = self._stop
+ else: # -self._len < key.start < self._len
+ start = self._get_by_index(key.start)
+
+ if key.stop is None or key.stop >= self._len:
+ stop = self._stop
+ elif key.stop <= -self._len:
+ stop = self._start
+ else: # -self._len < key.stop < self._len
+ stop = self._get_by_index(key.stop)
+
+ return numeric_range(start, stop, step)
+ else:
+ raise TypeError(
+ 'numeric range indices must be '
+ 'integers or slices, not {}'.format(type(key).__name__)
+ )
+
+ def __hash__(self):
+ if self:
+ return hash((self._start, self._get_by_index(-1), self._step))
+ else:
+ return self._EMPTY_HASH
+
+ def __iter__(self):
+ values = (self._start + (n * self._step) for n in count())
+ if self._growing:
+ return takewhile(partial(gt, self._stop), values)
+ else:
+ return takewhile(partial(lt, self._stop), values)
+
+ def __len__(self):
+ return self._len
+
+ @cached_property
+ def _len(self):
+ if self._growing:
+ start = self._start
+ stop = self._stop
+ step = self._step
+ else:
+ start = self._stop
+ stop = self._start
+ step = -self._step
+ distance = stop - start
+ if distance <= self._zero:
+ return 0
+ else: # distance > 0 and step > 0: regular euclidean division
+ q, r = divmod(distance, step)
+ return int(q) + int(r != self._zero)
+
+ def __reduce__(self):
+ return numeric_range, (self._start, self._stop, self._step)
+
+ def __repr__(self):
+ if self._step == 1:
+ return "numeric_range({}, {})".format(
+ repr(self._start), repr(self._stop)
+ )
+ else:
+ return "numeric_range({}, {}, {})".format(
+ repr(self._start), repr(self._stop), repr(self._step)
+ )
+
+ def __reversed__(self):
+ return iter(
+ numeric_range(
+ self._get_by_index(-1), self._start - self._step, -self._step
+ )
+ )
+
+ def count(self, value):
+ return int(value in self)
+
+ def index(self, value):
+ if self._growing:
+ if self._start <= value < self._stop:
+ q, r = divmod(value - self._start, self._step)
+ if r == self._zero:
+ return int(q)
+ else:
+ if self._start >= value > self._stop:
+ q, r = divmod(self._start - value, -self._step)
+ if r == self._zero:
+ return int(q)
+
+ raise ValueError("{} is not in numeric range".format(value))
+
+ def _get_by_index(self, i):
+ if i < 0:
+ i += self._len
+ if i < 0 or i >= self._len:
+ raise IndexError("numeric range object index out of range")
+ return self._start + i * self._step
+
+
+def count_cycle(iterable, n=None):
+ """Cycle through the items from *iterable* up to *n* times, yielding
+ the number of completed cycles along with each item. If *n* is omitted the
+ process repeats indefinitely.
+
+ >>> list(count_cycle('AB', 3))
+ [(0, 'A'), (0, 'B'), (1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')]
+
+ """
+ iterable = tuple(iterable)
+ if not iterable:
+ return iter(())
+ counter = count() if n is None else range(n)
+ return ((i, item) for i in counter for item in iterable)
+
+
+def mark_ends(iterable):
+ """Yield 3-tuples of the form ``(is_first, is_last, item)``.
+
+ >>> list(mark_ends('ABC'))
+ [(True, False, 'A'), (False, False, 'B'), (False, True, 'C')]
+
+ Use this when looping over an iterable to take special action on its first
+ and/or last items:
+
+ >>> iterable = ['Header', 100, 200, 'Footer']
+ >>> total = 0
+ >>> for is_first, is_last, item in mark_ends(iterable):
+ ... if is_first:
+ ... continue # Skip the header
+ ... if is_last:
+ ... continue # Skip the footer
+ ... total += item
+ >>> print(total)
+ 300
+ """
+ it = iter(iterable)
+
+ try:
+ b = next(it)
+ except StopIteration:
+ return
+
+ try:
+ for i in count():
+ a = b
+ b = next(it)
+ yield i == 0, False, a
+
+ except StopIteration:
+ yield i == 0, True, a
+
+
+def locate(iterable, pred=bool, window_size=None):
+ """Yield the index of each item in *iterable* for which *pred* returns
+ ``True``.
+
+ *pred* defaults to :func:`bool`, which will select truthy items:
+
+ >>> list(locate([0, 1, 1, 0, 1, 0, 0]))
+ [1, 2, 4]
+
+ Set *pred* to a custom function to, e.g., find the indexes for a particular
+ item.
+
+ >>> list(locate(['a', 'b', 'c', 'b'], lambda x: x == 'b'))
+ [1, 3]
+
+ If *window_size* is given, then the *pred* function will be called with
+ that many items. This enables searching for sub-sequences:
+
+ >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
+ >>> pred = lambda *args: args == (1, 2, 3)
+ >>> list(locate(iterable, pred=pred, window_size=3))
+ [1, 5, 9]
+
+ Use with :func:`seekable` to find indexes and then retrieve the associated
+ items:
+
+ >>> from itertools import count
+ >>> from more_itertools import seekable
+ >>> source = (3 * n + 1 if (n % 2) else n // 2 for n in count())
+ >>> it = seekable(source)
+ >>> pred = lambda x: x > 100
+ >>> indexes = locate(it, pred=pred)
+ >>> i = next(indexes)
+ >>> it.seek(i)
+ >>> next(it)
+ 106
+
+ """
+ if window_size is None:
+ return compress(count(), map(pred, iterable))
+
+ if window_size < 1:
+ raise ValueError('window size must be at least 1')
+
+ it = windowed(iterable, window_size, fillvalue=_marker)
+ return compress(count(), starmap(pred, it))
+
+
+def longest_common_prefix(iterables):
+ """Yield elements of the longest common prefix amongst given *iterables*.
+
+ >>> ''.join(longest_common_prefix(['abcd', 'abc', 'abf']))
+ 'ab'
+
+ """
+ return (c[0] for c in takewhile(all_equal, zip(*iterables)))
+
+
+def lstrip(iterable, pred):
+ """Yield the items from *iterable*, but strip any from the beginning
+ for which *pred* returns ``True``.
+
+ For example, to remove a set of items from the start of an iterable:
+
+ >>> iterable = (None, False, None, 1, 2, None, 3, False, None)
+ >>> pred = lambda x: x in {None, False, ''}
+ >>> list(lstrip(iterable, pred))
+ [1, 2, None, 3, False, None]
+
+ This function is analogous to to :func:`str.lstrip`, and is essentially
+ an wrapper for :func:`itertools.dropwhile`.
+
+ """
+ return dropwhile(pred, iterable)
+
+
+def rstrip(iterable, pred):
+ """Yield the items from *iterable*, but strip any from the end
+ for which *pred* returns ``True``.
+
+ For example, to remove a set of items from the end of an iterable:
+
+ >>> iterable = (None, False, None, 1, 2, None, 3, False, None)
+ >>> pred = lambda x: x in {None, False, ''}
+ >>> list(rstrip(iterable, pred))
+ [None, False, None, 1, 2, None, 3]
+
+ This function is analogous to :func:`str.rstrip`.
+
+ """
+ cache = []
+ cache_append = cache.append
+ cache_clear = cache.clear
+ for x in iterable:
+ if pred(x):
+ cache_append(x)
+ else:
+ yield from cache
+ cache_clear()
+ yield x
+
+
+def strip(iterable, pred):
+ """Yield the items from *iterable*, but strip any from the
+ beginning and end for which *pred* returns ``True``.
+
+ For example, to remove a set of items from both ends of an iterable:
+
+ >>> iterable = (None, False, None, 1, 2, None, 3, False, None)
+ >>> pred = lambda x: x in {None, False, ''}
+ >>> list(strip(iterable, pred))
+ [1, 2, None, 3]
+
+ This function is analogous to :func:`str.strip`.
+
+ """
+ return rstrip(lstrip(iterable, pred), pred)
+
+
+class islice_extended:
+ """An extension of :func:`itertools.islice` that supports negative values
+ for *stop*, *start*, and *step*.
+
+ >>> iterable = iter('abcdefgh')
+ >>> list(islice_extended(iterable, -4, -1))
+ ['e', 'f', 'g']
+
+ Slices with negative values require some caching of *iterable*, but this
+ function takes care to minimize the amount of memory required.
+
+ For example, you can use a negative step with an infinite iterator:
+
+ >>> from itertools import count
+ >>> list(islice_extended(count(), 110, 99, -2))
+ [110, 108, 106, 104, 102, 100]
+
+ You can also use slice notation directly:
+
+ >>> iterable = map(str, count())
+ >>> it = islice_extended(iterable)[10:20:2]
+ >>> list(it)
+ ['10', '12', '14', '16', '18']
+
+ """
+
+ def __init__(self, iterable, *args):
+ it = iter(iterable)
+ if args:
+ self._iterable = _islice_helper(it, slice(*args))
+ else:
+ self._iterable = it
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return next(self._iterable)
+
+ def __getitem__(self, key):
+ if isinstance(key, slice):
+ return islice_extended(_islice_helper(self._iterable, key))
+
+ raise TypeError('islice_extended.__getitem__ argument must be a slice')
+
+
+def _islice_helper(it, s):
+ start = s.start
+ stop = s.stop
+ if s.step == 0:
+ raise ValueError('step argument must be a non-zero integer or None.')
+ step = s.step or 1
+
+ if step > 0:
+ start = 0 if (start is None) else start
+
+ if start < 0:
+ # Consume all but the last -start items
+ cache = deque(enumerate(it, 1), maxlen=-start)
+ len_iter = cache[-1][0] if cache else 0
+
+ # Adjust start to be positive
+ i = max(len_iter + start, 0)
+
+ # Adjust stop to be positive
+ if stop is None:
+ j = len_iter
+ elif stop >= 0:
+ j = min(stop, len_iter)
+ else:
+ j = max(len_iter + stop, 0)
+
+ # Slice the cache
+ n = j - i
+ if n <= 0:
+ return
+
+ for index, item in islice(cache, 0, n, step):
+ yield item
+ elif (stop is not None) and (stop < 0):
+ # Advance to the start position
+ next(islice(it, start, start), None)
+
+ # When stop is negative, we have to carry -stop items while
+ # iterating
+ cache = deque(islice(it, -stop), maxlen=-stop)
+
+ for index, item in enumerate(it):
+ cached_item = cache.popleft()
+ if index % step == 0:
+ yield cached_item
+ cache.append(item)
+ else:
+ # When both start and stop are positive we have the normal case
+ yield from islice(it, start, stop, step)
+ else:
+ start = -1 if (start is None) else start
+
+ if (stop is not None) and (stop < 0):
+ # Consume all but the last items
+ n = -stop - 1
+ cache = deque(enumerate(it, 1), maxlen=n)
+ len_iter = cache[-1][0] if cache else 0
+
+ # If start and stop are both negative they are comparable and
+ # we can just slice. Otherwise we can adjust start to be negative
+ # and then slice.
+ if start < 0:
+ i, j = start, stop
+ else:
+ i, j = min(start - len_iter, -1), None
+
+ for index, item in list(cache)[i:j:step]:
+ yield item
+ else:
+ # Advance to the stop position
+ if stop is not None:
+ m = stop + 1
+ next(islice(it, m, m), None)
+
+ # stop is positive, so if start is negative they are not comparable
+ # and we need the rest of the items.
+ if start < 0:
+ i = start
+ n = None
+ # stop is None and start is positive, so we just need items up to
+ # the start index.
+ elif stop is None:
+ i = None
+ n = start + 1
+ # Both stop and start are positive, so they are comparable.
+ else:
+ i = None
+ n = start - stop
+ if n <= 0:
+ return
+
+ cache = list(islice(it, n))
+
+ yield from cache[i::step]
+
+
+def always_reversible(iterable):
+ """An extension of :func:`reversed` that supports all iterables, not
+ just those which implement the ``Reversible`` or ``Sequence`` protocols.
+
+ >>> print(*always_reversible(x for x in range(3)))
+ 2 1 0
+
+ If the iterable is already reversible, this function returns the
+ result of :func:`reversed()`. If the iterable is not reversible,
+ this function will cache the remaining items in the iterable and
+ yield them in reverse order, which may require significant storage.
+ """
+ try:
+ return reversed(iterable)
+ except TypeError:
+ return reversed(list(iterable))
+
+
+def consecutive_groups(iterable, ordering=lambda x: x):
+ """Yield groups of consecutive items using :func:`itertools.groupby`.
+ The *ordering* function determines whether two items are adjacent by
+ returning their position.
+
+ By default, the ordering function is the identity function. This is
+ suitable for finding runs of numbers:
+
+ >>> iterable = [1, 10, 11, 12, 20, 30, 31, 32, 33, 40]
+ >>> for group in consecutive_groups(iterable):
+ ... print(list(group))
+ [1]
+ [10, 11, 12]
+ [20]
+ [30, 31, 32, 33]
+ [40]
+
+ For finding runs of adjacent letters, try using the :meth:`index` method
+ of a string of letters:
+
+ >>> from string import ascii_lowercase
+ >>> iterable = 'abcdfgilmnop'
+ >>> ordering = ascii_lowercase.index
+ >>> for group in consecutive_groups(iterable, ordering):
+ ... print(list(group))
+ ['a', 'b', 'c', 'd']
+ ['f', 'g']
+ ['i']
+ ['l', 'm', 'n', 'o', 'p']
+
+ Each group of consecutive items is an iterator that shares it source with
+ *iterable*. When an an output group is advanced, the previous group is
+ no longer available unless its elements are copied (e.g., into a ``list``).
+
+ >>> iterable = [1, 2, 11, 12, 21, 22]
+ >>> saved_groups = []
+ >>> for group in consecutive_groups(iterable):
+ ... saved_groups.append(list(group)) # Copy group elements
+ >>> saved_groups
+ [[1, 2], [11, 12], [21, 22]]
+
+ """
+ for k, g in groupby(
+ enumerate(iterable), key=lambda x: x[0] - ordering(x[1])
+ ):
+ yield map(itemgetter(1), g)
+
+
+def difference(iterable, func=sub, *, initial=None):
+ """This function is the inverse of :func:`itertools.accumulate`. By default
+ it will compute the first difference of *iterable* using
+ :func:`operator.sub`:
+
+ >>> from itertools import accumulate
+ >>> iterable = accumulate([0, 1, 2, 3, 4]) # produces 0, 1, 3, 6, 10
+ >>> list(difference(iterable))
+ [0, 1, 2, 3, 4]
+
+ *func* defaults to :func:`operator.sub`, but other functions can be
+ specified. They will be applied as follows::
+
+ A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ...
+
+ For example, to do progressive division:
+
+ >>> iterable = [1, 2, 6, 24, 120]
+ >>> func = lambda x, y: x // y
+ >>> list(difference(iterable, func))
+ [1, 2, 3, 4, 5]
+
+ If the *initial* keyword is set, the first element will be skipped when
+ computing successive differences.
+
+ >>> it = [10, 11, 13, 16] # from accumulate([1, 2, 3], initial=10)
+ >>> list(difference(it, initial=10))
+ [1, 2, 3]
+
+ """
+ a, b = tee(iterable)
+ try:
+ first = [next(b)]
+ except StopIteration:
+ return iter([])
+
+ if initial is not None:
+ first = []
+
+ return chain(first, map(func, b, a))
+
+
+class SequenceView(Sequence):
+ """Return a read-only view of the sequence object *target*.
+
+ :class:`SequenceView` objects are analogous to Python's built-in
+ "dictionary view" types. They provide a dynamic view of a sequence's items,
+ meaning that when the sequence updates, so does the view.
+
+ >>> seq = ['0', '1', '2']
+ >>> view = SequenceView(seq)
+ >>> view
+ SequenceView(['0', '1', '2'])
+ >>> seq.append('3')
+ >>> view
+ SequenceView(['0', '1', '2', '3'])
+
+ Sequence views support indexing, slicing, and length queries. They act
+ like the underlying sequence, except they don't allow assignment:
+
+ >>> view[1]
+ '1'
+ >>> view[1:-1]
+ ['1', '2']
+ >>> len(view)
+ 4
+
+ Sequence views are useful as an alternative to copying, as they don't
+ require (much) extra storage.
+
+ """
+
+ def __init__(self, target):
+ if not isinstance(target, Sequence):
+ raise TypeError
+ self._target = target
+
+ def __getitem__(self, index):
+ return self._target[index]
+
+ def __len__(self):
+ return len(self._target)
+
+ def __repr__(self):
+ return '{}({})'.format(self.__class__.__name__, repr(self._target))
+
+
+class seekable:
+ """Wrap an iterator to allow for seeking backward and forward. This
+ progressively caches the items in the source iterable so they can be
+ re-visited.
+
+ Call :meth:`seek` with an index to seek to that position in the source
+ iterable.
+
+ To "reset" an iterator, seek to ``0``:
+
+ >>> from itertools import count
+ >>> it = seekable((str(n) for n in count()))
+ >>> next(it), next(it), next(it)
+ ('0', '1', '2')
+ >>> it.seek(0)
+ >>> next(it), next(it), next(it)
+ ('0', '1', '2')
+ >>> next(it)
+ '3'
+
+ You can also seek forward:
+
+ >>> it = seekable((str(n) for n in range(20)))
+ >>> it.seek(10)
+ >>> next(it)
+ '10'
+ >>> it.relative_seek(-2) # Seeking relative to the current position
+ >>> next(it)
+ '9'
+ >>> it.seek(20) # Seeking past the end of the source isn't a problem
+ >>> list(it)
+ []
+ >>> it.seek(0) # Resetting works even after hitting the end
+ >>> next(it), next(it), next(it)
+ ('0', '1', '2')
+
+ Call :meth:`peek` to look ahead one item without advancing the iterator:
+
+ >>> it = seekable('1234')
+ >>> it.peek()
+ '1'
+ >>> list(it)
+ ['1', '2', '3', '4']
+ >>> it.peek(default='empty')
+ 'empty'
+
+ Before the iterator is at its end, calling :func:`bool` on it will return
+ ``True``. After it will return ``False``:
+
+ >>> it = seekable('5678')
+ >>> bool(it)
+ True
+ >>> list(it)
+ ['5', '6', '7', '8']
+ >>> bool(it)
+ False
+
+ You may view the contents of the cache with the :meth:`elements` method.
+ That returns a :class:`SequenceView`, a view that updates automatically:
+
+ >>> it = seekable((str(n) for n in range(10)))
+ >>> next(it), next(it), next(it)
+ ('0', '1', '2')
+ >>> elements = it.elements()
+ >>> elements
+ SequenceView(['0', '1', '2'])
+ >>> next(it)
+ '3'
+ >>> elements
+ SequenceView(['0', '1', '2', '3'])
+
+ By default, the cache grows as the source iterable progresses, so beware of
+ wrapping very large or infinite iterables. Supply *maxlen* to limit the
+ size of the cache (this of course limits how far back you can seek).
+
+ >>> from itertools import count
+ >>> it = seekable((str(n) for n in count()), maxlen=2)
+ >>> next(it), next(it), next(it), next(it)
+ ('0', '1', '2', '3')
+ >>> list(it.elements())
+ ['2', '3']
+ >>> it.seek(0)
+ >>> next(it), next(it), next(it), next(it)
+ ('2', '3', '4', '5')
+ >>> next(it)
+ '6'
+
+ """
+
+ def __init__(self, iterable, maxlen=None):
+ self._source = iter(iterable)
+ if maxlen is None:
+ self._cache = []
+ else:
+ self._cache = deque([], maxlen)
+ self._index = None
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self._index is not None:
+ try:
+ item = self._cache[self._index]
+ except IndexError:
+ self._index = None
+ else:
+ self._index += 1
+ return item
+
+ item = next(self._source)
+ self._cache.append(item)
+ return item
+
+ def __bool__(self):
+ try:
+ self.peek()
+ except StopIteration:
+ return False
+ return True
+
+ def peek(self, default=_marker):
+ try:
+ peeked = next(self)
+ except StopIteration:
+ if default is _marker:
+ raise
+ return default
+ if self._index is None:
+ self._index = len(self._cache)
+ self._index -= 1
+ return peeked
+
+ def elements(self):
+ return SequenceView(self._cache)
+
+ def seek(self, index):
+ self._index = index
+ remainder = index - len(self._cache)
+ if remainder > 0:
+ consume(self, remainder)
+
+ def relative_seek(self, count):
+ index = len(self._cache)
+ self.seek(max(index + count, 0))
+
+
+class run_length:
+ """
+ :func:`run_length.encode` compresses an iterable with run-length encoding.
+ It yields groups of repeated items with the count of how many times they
+ were repeated:
+
+ >>> uncompressed = 'abbcccdddd'
+ >>> list(run_length.encode(uncompressed))
+ [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
+
+ :func:`run_length.decode` decompresses an iterable that was previously
+ compressed with run-length encoding. It yields the items of the
+ decompressed iterable:
+
+ >>> compressed = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
+ >>> list(run_length.decode(compressed))
+ ['a', 'b', 'b', 'c', 'c', 'c', 'd', 'd', 'd', 'd']
+
+ """
+
+ @staticmethod
+ def encode(iterable):
+ return ((k, ilen(g)) for k, g in groupby(iterable))
+
+ @staticmethod
+ def decode(iterable):
+ return chain.from_iterable(repeat(k, n) for k, n in iterable)
+
+
+def exactly_n(iterable, n, predicate=bool):
+ """Return ``True`` if exactly ``n`` items in the iterable are ``True``
+ according to the *predicate* function.
+
+ >>> exactly_n([True, True, False], 2)
+ True
+ >>> exactly_n([True, True, False], 1)
+ False
+ >>> exactly_n([0, 1, 2, 3, 4, 5], 3, lambda x: x < 3)
+ True
+
+ The iterable will be advanced until ``n + 1`` truthy items are encountered,
+ so avoid calling it on infinite iterables.
+
+ """
+ return len(take(n + 1, filter(predicate, iterable))) == n
+
+
+def circular_shifts(iterable):
+ """Return a list of circular shifts of *iterable*.
+
+ >>> circular_shifts(range(4))
+ [(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)]
+ """
+ lst = list(iterable)
+ return take(len(lst), windowed(cycle(lst), len(lst)))
+
+
+def make_decorator(wrapping_func, result_index=0):
+ """Return a decorator version of *wrapping_func*, which is a function that
+ modifies an iterable. *result_index* is the position in that function's
+ signature where the iterable goes.
+
+ This lets you use itertools on the "production end," i.e. at function
+ definition. This can augment what the function returns without changing the
+ function's code.
+
+ For example, to produce a decorator version of :func:`chunked`:
+
+ >>> from more_itertools import chunked
+ >>> chunker = make_decorator(chunked, result_index=0)
+ >>> @chunker(3)
+ ... def iter_range(n):
+ ... return iter(range(n))
+ ...
+ >>> list(iter_range(9))
+ [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
+
+ To only allow truthy items to be returned:
+
+ >>> truth_serum = make_decorator(filter, result_index=1)
+ >>> @truth_serum(bool)
+ ... def boolean_test():
+ ... return [0, 1, '', ' ', False, True]
+ ...
+ >>> list(boolean_test())
+ [1, ' ', True]
+
+ The :func:`peekable` and :func:`seekable` wrappers make for practical
+ decorators:
+
+ >>> from more_itertools import peekable
+ >>> peekable_function = make_decorator(peekable)
+ >>> @peekable_function()
+ ... def str_range(*args):
+ ... return (str(x) for x in range(*args))
+ ...
+ >>> it = str_range(1, 20, 2)
+ >>> next(it), next(it), next(it)
+ ('1', '3', '5')
+ >>> it.peek()
+ '7'
+ >>> next(it)
+ '7'
+
+ """
+
+ # See https://sites.google.com/site/bbayles/index/decorator_factory for
+ # notes on how this works.
+ def decorator(*wrapping_args, **wrapping_kwargs):
+ def outer_wrapper(f):
+ def inner_wrapper(*args, **kwargs):
+ result = f(*args, **kwargs)
+ wrapping_args_ = list(wrapping_args)
+ wrapping_args_.insert(result_index, result)
+ return wrapping_func(*wrapping_args_, **wrapping_kwargs)
+
+ return inner_wrapper
+
+ return outer_wrapper
+
+ return decorator
+
+
+def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None):
+ """Return a dictionary that maps the items in *iterable* to categories
+ defined by *keyfunc*, transforms them with *valuefunc*, and
+ then summarizes them by category with *reducefunc*.
+
+ *valuefunc* defaults to the identity function if it is unspecified.
+ If *reducefunc* is unspecified, no summarization takes place:
+
+ >>> keyfunc = lambda x: x.upper()
+ >>> result = map_reduce('abbccc', keyfunc)
+ >>> sorted(result.items())
+ [('A', ['a']), ('B', ['b', 'b']), ('C', ['c', 'c', 'c'])]
+
+ Specifying *valuefunc* transforms the categorized items:
+
+ >>> keyfunc = lambda x: x.upper()
+ >>> valuefunc = lambda x: 1
+ >>> result = map_reduce('abbccc', keyfunc, valuefunc)
+ >>> sorted(result.items())
+ [('A', [1]), ('B', [1, 1]), ('C', [1, 1, 1])]
+
+ Specifying *reducefunc* summarizes the categorized items:
+
+ >>> keyfunc = lambda x: x.upper()
+ >>> valuefunc = lambda x: 1
+ >>> reducefunc = sum
+ >>> result = map_reduce('abbccc', keyfunc, valuefunc, reducefunc)
+ >>> sorted(result.items())
+ [('A', 1), ('B', 2), ('C', 3)]
+
+ You may want to filter the input iterable before applying the map/reduce
+ procedure:
+
+ >>> all_items = range(30)
+ >>> items = [x for x in all_items if 10 <= x <= 20] # Filter
+ >>> keyfunc = lambda x: x % 2 # Evens map to 0; odds to 1
+ >>> categories = map_reduce(items, keyfunc=keyfunc)
+ >>> sorted(categories.items())
+ [(0, [10, 12, 14, 16, 18, 20]), (1, [11, 13, 15, 17, 19])]
+ >>> summaries = map_reduce(items, keyfunc=keyfunc, reducefunc=sum)
+ >>> sorted(summaries.items())
+ [(0, 90), (1, 75)]
+
+ Note that all items in the iterable are gathered into a list before the
+ summarization step, which may require significant storage.
+
+ The returned object is a :obj:`collections.defaultdict` with the
+ ``default_factory`` set to ``None``, such that it behaves like a normal
+ dictionary.
+
+ """
+ valuefunc = (lambda x: x) if (valuefunc is None) else valuefunc
+
+ ret = defaultdict(list)
+ for item in iterable:
+ key = keyfunc(item)
+ value = valuefunc(item)
+ ret[key].append(value)
+
+ if reducefunc is not None:
+ for key, value_list in ret.items():
+ ret[key] = reducefunc(value_list)
+
+ ret.default_factory = None
+ return ret
+
+
+def rlocate(iterable, pred=bool, window_size=None):
+ """Yield the index of each item in *iterable* for which *pred* returns
+ ``True``, starting from the right and moving left.
+
+ *pred* defaults to :func:`bool`, which will select truthy items:
+
+ >>> list(rlocate([0, 1, 1, 0, 1, 0, 0])) # Truthy at 1, 2, and 4
+ [4, 2, 1]
+
+ Set *pred* to a custom function to, e.g., find the indexes for a particular
+ item:
+
+ >>> iterable = iter('abcb')
+ >>> pred = lambda x: x == 'b'
+ >>> list(rlocate(iterable, pred))
+ [3, 1]
+
+ If *window_size* is given, then the *pred* function will be called with
+ that many items. This enables searching for sub-sequences:
+
+ >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
+ >>> pred = lambda *args: args == (1, 2, 3)
+ >>> list(rlocate(iterable, pred=pred, window_size=3))
+ [9, 5, 1]
+
+ Beware, this function won't return anything for infinite iterables.
+ If *iterable* is reversible, ``rlocate`` will reverse it and search from
+ the right. Otherwise, it will search from the left and return the results
+ in reverse order.
+
+ See :func:`locate` to for other example applications.
+
+ """
+ if window_size is None:
+ try:
+ len_iter = len(iterable)
+ return (len_iter - i - 1 for i in locate(reversed(iterable), pred))
+ except TypeError:
+ pass
+
+ return reversed(list(locate(iterable, pred, window_size)))
+
+
+def replace(iterable, pred, substitutes, count=None, window_size=1):
+ """Yield the items from *iterable*, replacing the items for which *pred*
+ returns ``True`` with the items from the iterable *substitutes*.
+
+ >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1]
+ >>> pred = lambda x: x == 0
+ >>> substitutes = (2, 3)
+ >>> list(replace(iterable, pred, substitutes))
+ [1, 1, 2, 3, 1, 1, 2, 3, 1, 1]
+
+ If *count* is given, the number of replacements will be limited:
+
+ >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1, 0]
+ >>> pred = lambda x: x == 0
+ >>> substitutes = [None]
+ >>> list(replace(iterable, pred, substitutes, count=2))
+ [1, 1, None, 1, 1, None, 1, 1, 0]
+
+ Use *window_size* to control the number of items passed as arguments to
+ *pred*. This allows for locating and replacing subsequences.
+
+ >>> iterable = [0, 1, 2, 5, 0, 1, 2, 5]
+ >>> window_size = 3
+ >>> pred = lambda *args: args == (0, 1, 2) # 3 items passed to pred
+ >>> substitutes = [3, 4] # Splice in these items
+ >>> list(replace(iterable, pred, substitutes, window_size=window_size))
+ [3, 4, 5, 3, 4, 5]
+
+ """
+ if window_size < 1:
+ raise ValueError('window_size must be at least 1')
+
+ # Save the substitutes iterable, since it's used more than once
+ substitutes = tuple(substitutes)
+
+ # Add padding such that the number of windows matches the length of the
+ # iterable
+ it = chain(iterable, [_marker] * (window_size - 1))
+ windows = windowed(it, window_size)
+
+ n = 0
+ for w in windows:
+ # If the current window matches our predicate (and we haven't hit
+ # our maximum number of replacements), splice in the substitutes
+ # and then consume the following windows that overlap with this one.
+ # For example, if the iterable is (0, 1, 2, 3, 4...)
+ # and the window size is 2, we have (0, 1), (1, 2), (2, 3)...
+ # If the predicate matches on (0, 1), we need to zap (0, 1) and (1, 2)
+ if pred(*w):
+ if (count is None) or (n < count):
+ n += 1
+ yield from substitutes
+ consume(windows, window_size - 1)
+ continue
+
+ # If there was no match (or we've reached the replacement limit),
+ # yield the first item from the window.
+ if w and (w[0] is not _marker):
+ yield w[0]
+
+
+def partitions(iterable):
+ """Yield all possible order-preserving partitions of *iterable*.
+
+ >>> iterable = 'abc'
+ >>> for part in partitions(iterable):
+ ... print([''.join(p) for p in part])
+ ['abc']
+ ['a', 'bc']
+ ['ab', 'c']
+ ['a', 'b', 'c']
+
+ This is unrelated to :func:`partition`.
+
+ """
+ sequence = list(iterable)
+ n = len(sequence)
+ for i in powerset(range(1, n)):
+ yield [sequence[i:j] for i, j in zip((0,) + i, i + (n,))]
+
+
+def set_partitions(iterable, k=None):
+ """
+ Yield the set partitions of *iterable* into *k* parts. Set partitions are
+ not order-preserving.
+
+ >>> iterable = 'abc'
+ >>> for part in set_partitions(iterable, 2):
+ ... print([''.join(p) for p in part])
+ ['a', 'bc']
+ ['ab', 'c']
+ ['b', 'ac']
+
+
+ If *k* is not given, every set partition is generated.
+
+ >>> iterable = 'abc'
+ >>> for part in set_partitions(iterable):
+ ... print([''.join(p) for p in part])
+ ['abc']
+ ['a', 'bc']
+ ['ab', 'c']
+ ['b', 'ac']
+ ['a', 'b', 'c']
+
+ """
+ L = list(iterable)
+ n = len(L)
+ if k is not None:
+ if k < 1:
+ raise ValueError(
+ "Can't partition in a negative or zero number of groups"
+ )
+ elif k > n:
+ return
+
+ def set_partitions_helper(L, k):
+ n = len(L)
+ if k == 1:
+ yield [L]
+ elif n == k:
+ yield [[s] for s in L]
+ else:
+ e, *M = L
+ for p in set_partitions_helper(M, k - 1):
+ yield [[e], *p]
+ for p in set_partitions_helper(M, k):
+ for i in range(len(p)):
+ yield p[:i] + [[e] + p[i]] + p[i + 1 :]
+
+ if k is None:
+ for k in range(1, n + 1):
+ yield from set_partitions_helper(L, k)
+ else:
+ yield from set_partitions_helper(L, k)
+
+
+class time_limited:
+ """
+ Yield items from *iterable* until *limit_seconds* have passed.
+ If the time limit expires before all items have been yielded, the
+ ``timed_out`` parameter will be set to ``True``.
+
+ >>> from time import sleep
+ >>> def generator():
+ ... yield 1
+ ... yield 2
+ ... sleep(0.2)
+ ... yield 3
+ >>> iterable = time_limited(0.1, generator())
+ >>> list(iterable)
+ [1, 2]
+ >>> iterable.timed_out
+ True
+
+ Note that the time is checked before each item is yielded, and iteration
+ stops if the time elapsed is greater than *limit_seconds*. If your time
+ limit is 1 second, but it takes 2 seconds to generate the first item from
+ the iterable, the function will run for 2 seconds and not yield anything.
+ As a special case, when *limit_seconds* is zero, the iterator never
+ returns anything.
+
+ """
+
+ def __init__(self, limit_seconds, iterable):
+ if limit_seconds < 0:
+ raise ValueError('limit_seconds must be positive')
+ self.limit_seconds = limit_seconds
+ self._iterable = iter(iterable)
+ self._start_time = monotonic()
+ self.timed_out = False
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self.limit_seconds == 0:
+ self.timed_out = True
+ raise StopIteration
+ item = next(self._iterable)
+ if monotonic() - self._start_time > self.limit_seconds:
+ self.timed_out = True
+ raise StopIteration
+
+ return item
+
+
+def only(iterable, default=None, too_long=None):
+ """If *iterable* has only one item, return it.
+ If it has zero items, return *default*.
+ If it has more than one item, raise the exception given by *too_long*,
+ which is ``ValueError`` by default.
+
+ >>> only([], default='missing')
+ 'missing'
+ >>> only([1])
+ 1
+ >>> only([1, 2]) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ ValueError: Expected exactly one item in iterable, but got 1, 2,
+ and perhaps more.'
+ >>> only([1, 2], too_long=TypeError) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ TypeError
+
+ Note that :func:`only` attempts to advance *iterable* twice to ensure there
+ is only one item. See :func:`spy` or :func:`peekable` to check
+ iterable contents less destructively.
+ """
+ it = iter(iterable)
+ first_value = next(it, default)
+
+ try:
+ second_value = next(it)
+ except StopIteration:
+ pass
+ else:
+ msg = (
+ 'Expected exactly one item in iterable, but got {!r}, {!r}, '
+ 'and perhaps more.'.format(first_value, second_value)
+ )
+ raise too_long or ValueError(msg)
+
+ return first_value
+
+
+class _IChunk:
+ def __init__(self, iterable, n):
+ self._it = islice(iterable, n)
+ self._cache = deque()
+
+ def fill_cache(self):
+ self._cache.extend(self._it)
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ try:
+ return next(self._it)
+ except StopIteration:
+ if self._cache:
+ return self._cache.popleft()
+ else:
+ raise
+
+
+def ichunked(iterable, n):
+ """Break *iterable* into sub-iterables with *n* elements each.
+ :func:`ichunked` is like :func:`chunked`, but it yields iterables
+ instead of lists.
+
+ If the sub-iterables are read in order, the elements of *iterable*
+ won't be stored in memory.
+ If they are read out of order, :func:`itertools.tee` is used to cache
+ elements as necessary.
+
+ >>> from itertools import count
+ >>> all_chunks = ichunked(count(), 4)
+ >>> c_1, c_2, c_3 = next(all_chunks), next(all_chunks), next(all_chunks)
+ >>> list(c_2) # c_1's elements have been cached; c_3's haven't been
+ [4, 5, 6, 7]
+ >>> list(c_1)
+ [0, 1, 2, 3]
+ >>> list(c_3)
+ [8, 9, 10, 11]
+
+ """
+ source = peekable(iter(iterable))
+ ichunk_marker = object()
+ while True:
+ # Check to see whether we're at the end of the source iterable
+ item = source.peek(ichunk_marker)
+ if item is ichunk_marker:
+ return
+
+ chunk = _IChunk(source, n)
+ yield chunk
+
+ # Advance the source iterable and fill previous chunk's cache
+ chunk.fill_cache()
+
+
+def iequals(*iterables):
+ """Return ``True`` if all given *iterables* are equal to each other,
+ which means that they contain the same elements in the same order.
+
+ The function is useful for comparing iterables of different data types
+ or iterables that do not support equality checks.
+
+ >>> iequals("abc", ['a', 'b', 'c'], ('a', 'b', 'c'), iter("abc"))
+ True
+
+ >>> iequals("abc", "acb")
+ False
+
+ Not to be confused with :func:`all_equal`, which checks whether all
+ elements of iterable are equal to each other.
+
+ """
+ return all(map(all_equal, zip_longest(*iterables, fillvalue=object())))
+
+
+def distinct_combinations(iterable, r):
+ """Yield the distinct combinations of *r* items taken from *iterable*.
+
+ >>> list(distinct_combinations([0, 0, 1], 2))
+ [(0, 0), (0, 1)]
+
+ Equivalent to ``set(combinations(iterable))``, except duplicates are not
+ generated and thrown away. For larger input sequences this is much more
+ efficient.
+
+ """
+ if r < 0:
+ raise ValueError('r must be non-negative')
+ elif r == 0:
+ yield ()
+ return
+ pool = tuple(iterable)
+ generators = [unique_everseen(enumerate(pool), key=itemgetter(1))]
+ current_combo = [None] * r
+ level = 0
+ while generators:
+ try:
+ cur_idx, p = next(generators[-1])
+ except StopIteration:
+ generators.pop()
+ level -= 1
+ continue
+ current_combo[level] = p
+ if level + 1 == r:
+ yield tuple(current_combo)
+ else:
+ generators.append(
+ unique_everseen(
+ enumerate(pool[cur_idx + 1 :], cur_idx + 1),
+ key=itemgetter(1),
+ )
+ )
+ level += 1
+
+
+def filter_except(validator, iterable, *exceptions):
+ """Yield the items from *iterable* for which the *validator* function does
+ not raise one of the specified *exceptions*.
+
+ *validator* is called for each item in *iterable*.
+ It should be a function that accepts one argument and raises an exception
+ if that item is not valid.
+
+ >>> iterable = ['1', '2', 'three', '4', None]
+ >>> list(filter_except(int, iterable, ValueError, TypeError))
+ ['1', '2', '4']
+
+ If an exception other than one given by *exceptions* is raised by
+ *validator*, it is raised like normal.
+ """
+ for item in iterable:
+ try:
+ validator(item)
+ except exceptions:
+ pass
+ else:
+ yield item
+
+
+def map_except(function, iterable, *exceptions):
+ """Transform each item from *iterable* with *function* and yield the
+ result, unless *function* raises one of the specified *exceptions*.
+
+ *function* is called to transform each item in *iterable*.
+ It should accept one argument.
+
+ >>> iterable = ['1', '2', 'three', '4', None]
+ >>> list(map_except(int, iterable, ValueError, TypeError))
+ [1, 2, 4]
+
+ If an exception other than one given by *exceptions* is raised by
+ *function*, it is raised like normal.
+ """
+ for item in iterable:
+ try:
+ yield function(item)
+ except exceptions:
+ pass
+
+
+def map_if(iterable, pred, func, func_else=lambda x: x):
+ """Evaluate each item from *iterable* using *pred*. If the result is
+ equivalent to ``True``, transform the item with *func* and yield it.
+ Otherwise, transform the item with *func_else* and yield it.
+
+ *pred*, *func*, and *func_else* should each be functions that accept
+ one argument. By default, *func_else* is the identity function.
+
+ >>> from math import sqrt
+ >>> iterable = list(range(-5, 5))
+ >>> iterable
+ [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
+ >>> list(map_if(iterable, lambda x: x > 3, lambda x: 'toobig'))
+ [-5, -4, -3, -2, -1, 0, 1, 2, 3, 'toobig']
+ >>> list(map_if(iterable, lambda x: x >= 0,
+ ... lambda x: f'{sqrt(x):.2f}', lambda x: None))
+ [None, None, None, None, None, '0.00', '1.00', '1.41', '1.73', '2.00']
+ """
+ for item in iterable:
+ yield func(item) if pred(item) else func_else(item)
+
+
+def _sample_unweighted(iterable, k):
+ # Implementation of "Algorithm L" from the 1994 paper by Kim-Hung Li:
+ # "Reservoir-Sampling Algorithms of Time Complexity O(n(1+log(N/n)))".
+
+ # Fill up the reservoir (collection of samples) with the first `k` samples
+ reservoir = take(k, iterable)
+
+ # Generate random number that's the largest in a sample of k U(0,1) numbers
+ # Largest order statistic: https://en.wikipedia.org/wiki/Order_statistic
+ W = exp(log(random()) / k)
+
+ # The number of elements to skip before changing the reservoir is a random
+ # number with a geometric distribution. Sample it using random() and logs.
+ next_index = k + floor(log(random()) / log(1 - W))
+
+ for index, element in enumerate(iterable, k):
+ if index == next_index:
+ reservoir[randrange(k)] = element
+ # The new W is the largest in a sample of k U(0, `old_W`) numbers
+ W *= exp(log(random()) / k)
+ next_index += floor(log(random()) / log(1 - W)) + 1
+
+ return reservoir
+
+
+def _sample_weighted(iterable, k, weights):
+ # Implementation of "A-ExpJ" from the 2006 paper by Efraimidis et al. :
+ # "Weighted random sampling with a reservoir".
+
+ # Log-transform for numerical stability for weights that are small/large
+ weight_keys = (log(random()) / weight for weight in weights)
+
+ # Fill up the reservoir (collection of samples) with the first `k`
+ # weight-keys and elements, then heapify the list.
+ reservoir = take(k, zip(weight_keys, iterable))
+ heapify(reservoir)
+
+ # The number of jumps before changing the reservoir is a random variable
+ # with an exponential distribution. Sample it using random() and logs.
+ smallest_weight_key, _ = reservoir[0]
+ weights_to_skip = log(random()) / smallest_weight_key
+
+ for weight, element in zip(weights, iterable):
+ if weight >= weights_to_skip:
+ # The notation here is consistent with the paper, but we store
+ # the weight-keys in log-space for better numerical stability.
+ smallest_weight_key, _ = reservoir[0]
+ t_w = exp(weight * smallest_weight_key)
+ r_2 = uniform(t_w, 1) # generate U(t_w, 1)
+ weight_key = log(r_2) / weight
+ heapreplace(reservoir, (weight_key, element))
+ smallest_weight_key, _ = reservoir[0]
+ weights_to_skip = log(random()) / smallest_weight_key
+ else:
+ weights_to_skip -= weight
+
+ # Equivalent to [element for weight_key, element in sorted(reservoir)]
+ return [heappop(reservoir)[1] for _ in range(k)]
+
+
+def sample(iterable, k, weights=None):
+ """Return a *k*-length list of elements chosen (without replacement)
+ from the *iterable*. Like :func:`random.sample`, but works on iterables
+ of unknown length.
+
+ >>> iterable = range(100)
+ >>> sample(iterable, 5) # doctest: +SKIP
+ [81, 60, 96, 16, 4]
+
+ An iterable with *weights* may also be given:
+
+ >>> iterable = range(100)
+ >>> weights = (i * i + 1 for i in range(100))
+ >>> sampled = sample(iterable, 5, weights=weights) # doctest: +SKIP
+ [79, 67, 74, 66, 78]
+
+ The algorithm can also be used to generate weighted random permutations.
+ The relative weight of each item determines the probability that it
+ appears late in the permutation.
+
+ >>> data = "abcdefgh"
+ >>> weights = range(1, len(data) + 1)
+ >>> sample(data, k=len(data), weights=weights) # doctest: +SKIP
+ ['c', 'a', 'b', 'e', 'g', 'd', 'h', 'f']
+ """
+ if k == 0:
+ return []
+
+ iterable = iter(iterable)
+ if weights is None:
+ return _sample_unweighted(iterable, k)
+ else:
+ weights = iter(weights)
+ return _sample_weighted(iterable, k, weights)
+
+
+def is_sorted(iterable, key=None, reverse=False, strict=False):
+ """Returns ``True`` if the items of iterable are in sorted order, and
+ ``False`` otherwise. *key* and *reverse* have the same meaning that they do
+ in the built-in :func:`sorted` function.
+
+ >>> is_sorted(['1', '2', '3', '4', '5'], key=int)
+ True
+ >>> is_sorted([5, 4, 3, 1, 2], reverse=True)
+ False
+
+ If *strict*, tests for strict sorting, that is, returns ``False`` if equal
+ elements are found:
+
+ >>> is_sorted([1, 2, 2])
+ True
+ >>> is_sorted([1, 2, 2], strict=True)
+ False
+
+ The function returns ``False`` after encountering the first out-of-order
+ item. If there are no out-of-order items, the iterable is exhausted.
+ """
+
+ compare = (le if reverse else ge) if strict else (lt if reverse else gt)
+ it = iterable if key is None else map(key, iterable)
+ return not any(starmap(compare, pairwise(it)))
+
+
+class AbortThread(BaseException):
+ pass
+
+
+class callback_iter:
+ """Convert a function that uses callbacks to an iterator.
+
+ Let *func* be a function that takes a `callback` keyword argument.
+ For example:
+
+ >>> def func(callback=None):
+ ... for i, c in [(1, 'a'), (2, 'b'), (3, 'c')]:
+ ... if callback:
+ ... callback(i, c)
+ ... return 4
+
+
+ Use ``with callback_iter(func)`` to get an iterator over the parameters
+ that are delivered to the callback.
+
+ >>> with callback_iter(func) as it:
+ ... for args, kwargs in it:
+ ... print(args)
+ (1, 'a')
+ (2, 'b')
+ (3, 'c')
+
+ The function will be called in a background thread. The ``done`` property
+ indicates whether it has completed execution.
+
+ >>> it.done
+ True
+
+ If it completes successfully, its return value will be available
+ in the ``result`` property.
+
+ >>> it.result
+ 4
+
+ Notes:
+
+ * If the function uses some keyword argument besides ``callback``, supply
+ *callback_kwd*.
+ * If it finished executing, but raised an exception, accessing the
+ ``result`` property will raise the same exception.
+ * If it hasn't finished executing, accessing the ``result``
+ property from within the ``with`` block will raise ``RuntimeError``.
+ * If it hasn't finished executing, accessing the ``result`` property from
+ outside the ``with`` block will raise a
+ ``more_itertools.AbortThread`` exception.
+ * Provide *wait_seconds* to adjust how frequently the it is polled for
+ output.
+
+ """
+
+ def __init__(self, func, callback_kwd='callback', wait_seconds=0.1):
+ self._func = func
+ self._callback_kwd = callback_kwd
+ self._aborted = False
+ self._future = None
+ self._wait_seconds = wait_seconds
+ # Lazily import concurrent.future
+ self._executor = __import__(
+ 'concurrent.futures'
+ ).futures.ThreadPoolExecutor(max_workers=1)
+ self._iterator = self._reader()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self._aborted = True
+ self._executor.shutdown()
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return next(self._iterator)
+
+ @property
+ def done(self):
+ if self._future is None:
+ return False
+ return self._future.done()
+
+ @property
+ def result(self):
+ if not self.done:
+ raise RuntimeError('Function has not yet completed')
+
+ return self._future.result()
+
+ def _reader(self):
+ q = Queue()
+
+ def callback(*args, **kwargs):
+ if self._aborted:
+ raise AbortThread('canceled by user')
+
+ q.put((args, kwargs))
+
+ self._future = self._executor.submit(
+ self._func, **{self._callback_kwd: callback}
+ )
+
+ while True:
+ try:
+ item = q.get(timeout=self._wait_seconds)
+ except Empty:
+ pass
+ else:
+ q.task_done()
+ yield item
+
+ if self._future.done():
+ break
+
+ remaining = []
+ while True:
+ try:
+ item = q.get_nowait()
+ except Empty:
+ break
+ else:
+ q.task_done()
+ remaining.append(item)
+ q.join()
+ yield from remaining
+
+
+def windowed_complete(iterable, n):
+ """
+ Yield ``(beginning, middle, end)`` tuples, where:
+
+ * Each ``middle`` has *n* items from *iterable*
+ * Each ``beginning`` has the items before the ones in ``middle``
+ * Each ``end`` has the items after the ones in ``middle``
+
+ >>> iterable = range(7)
+ >>> n = 3
+ >>> for beginning, middle, end in windowed_complete(iterable, n):
+ ... print(beginning, middle, end)
+ () (0, 1, 2) (3, 4, 5, 6)
+ (0,) (1, 2, 3) (4, 5, 6)
+ (0, 1) (2, 3, 4) (5, 6)
+ (0, 1, 2) (3, 4, 5) (6,)
+ (0, 1, 2, 3) (4, 5, 6) ()
+
+ Note that *n* must be at least 0 and most equal to the length of
+ *iterable*.
+
+ This function will exhaust the iterable and may require significant
+ storage.
+ """
+ if n < 0:
+ raise ValueError('n must be >= 0')
+
+ seq = tuple(iterable)
+ size = len(seq)
+
+ if n > size:
+ raise ValueError('n must be <= len(seq)')
+
+ for i in range(size - n + 1):
+ beginning = seq[:i]
+ middle = seq[i : i + n]
+ end = seq[i + n :]
+ yield beginning, middle, end
+
+
+def all_unique(iterable, key=None):
+ """
+ Returns ``True`` if all the elements of *iterable* are unique (no two
+ elements are equal).
+
+ >>> all_unique('ABCB')
+ False
+
+ If a *key* function is specified, it will be used to make comparisons.
+
+ >>> all_unique('ABCb')
+ True
+ >>> all_unique('ABCb', str.lower)
+ False
+
+ The function returns as soon as the first non-unique element is
+ encountered. Iterables with a mix of hashable and unhashable items can
+ be used, but the function will be slower for unhashable items.
+ """
+ seenset = set()
+ seenset_add = seenset.add
+ seenlist = []
+ seenlist_add = seenlist.append
+ for element in map(key, iterable) if key else iterable:
+ try:
+ if element in seenset:
+ return False
+ seenset_add(element)
+ except TypeError:
+ if element in seenlist:
+ return False
+ seenlist_add(element)
+ return True
+
+
+def nth_product(index, *args):
+ """Equivalent to ``list(product(*args))[index]``.
+
+ The products of *args* can be ordered lexicographically.
+ :func:`nth_product` computes the product at sort position *index* without
+ computing the previous products.
+
+ >>> nth_product(8, range(2), range(2), range(2), range(2))
+ (1, 0, 0, 0)
+
+ ``IndexError`` will be raised if the given *index* is invalid.
+ """
+ pools = list(map(tuple, reversed(args)))
+ ns = list(map(len, pools))
+
+ c = reduce(mul, ns)
+
+ if index < 0:
+ index += c
+
+ if not 0 <= index < c:
+ raise IndexError
+
+ result = []
+ for pool, n in zip(pools, ns):
+ result.append(pool[index % n])
+ index //= n
+
+ return tuple(reversed(result))
+
+
+def nth_permutation(iterable, r, index):
+ """Equivalent to ``list(permutations(iterable, r))[index]```
+
+ The subsequences of *iterable* that are of length *r* where order is
+ important can be ordered lexicographically. :func:`nth_permutation`
+ computes the subsequence at sort position *index* directly, without
+ computing the previous subsequences.
+
+ >>> nth_permutation('ghijk', 2, 5)
+ ('h', 'i')
+
+ ``ValueError`` will be raised If *r* is negative or greater than the length
+ of *iterable*.
+ ``IndexError`` will be raised if the given *index* is invalid.
+ """
+ pool = list(iterable)
+ n = len(pool)
+
+ if r is None or r == n:
+ r, c = n, factorial(n)
+ elif not 0 <= r < n:
+ raise ValueError
+ else:
+ c = perm(n, r)
+
+ if index < 0:
+ index += c
+
+ if not 0 <= index < c:
+ raise IndexError
+
+ if c == 0:
+ return tuple()
+
+ result = [0] * r
+ q = index * factorial(n) // c if r < n else index
+ for d in range(1, n + 1):
+ q, i = divmod(q, d)
+ if 0 <= n - d < r:
+ result[n - d] = i
+ if q == 0:
+ break
+
+ return tuple(map(pool.pop, result))
+
+
+def nth_combination_with_replacement(iterable, r, index):
+ """Equivalent to
+ ``list(combinations_with_replacement(iterable, r))[index]``.
+
+
+ The subsequences with repetition of *iterable* that are of length *r* can
+ be ordered lexicographically. :func:`nth_combination_with_replacement`
+ computes the subsequence at sort position *index* directly, without
+ computing the previous subsequences with replacement.
+
+ >>> nth_combination_with_replacement(range(5), 3, 5)
+ (0, 1, 1)
+
+ ``ValueError`` will be raised If *r* is negative or greater than the length
+ of *iterable*.
+ ``IndexError`` will be raised if the given *index* is invalid.
+ """
+ pool = tuple(iterable)
+ n = len(pool)
+ if (r < 0) or (r > n):
+ raise ValueError
+
+ c = comb(n + r - 1, r)
+
+ if index < 0:
+ index += c
+
+ if (index < 0) or (index >= c):
+ raise IndexError
+
+ result = []
+ i = 0
+ while r:
+ r -= 1
+ while n >= 0:
+ num_combs = comb(n + r - 1, r)
+ if index < num_combs:
+ break
+ n -= 1
+ i += 1
+ index -= num_combs
+ result.append(pool[i])
+
+ return tuple(result)
+
+
+def value_chain(*args):
+ """Yield all arguments passed to the function in the same order in which
+ they were passed. If an argument itself is iterable then iterate over its
+ values.
+
+ >>> list(value_chain(1, 2, 3, [4, 5, 6]))
+ [1, 2, 3, 4, 5, 6]
+
+ Binary and text strings are not considered iterable and are emitted
+ as-is:
+
+ >>> list(value_chain('12', '34', ['56', '78']))
+ ['12', '34', '56', '78']
+
+
+ Multiple levels of nesting are not flattened.
+
+ """
+ for value in args:
+ if isinstance(value, (str, bytes)):
+ yield value
+ continue
+ try:
+ yield from value
+ except TypeError:
+ yield value
+
+
+def product_index(element, *args):
+ """Equivalent to ``list(product(*args)).index(element)``
+
+ The products of *args* can be ordered lexicographically.
+ :func:`product_index` computes the first index of *element* without
+ computing the previous products.
+
+ >>> product_index([8, 2], range(10), range(5))
+ 42
+
+ ``ValueError`` will be raised if the given *element* isn't in the product
+ of *args*.
+ """
+ index = 0
+
+ for x, pool in zip_longest(element, args, fillvalue=_marker):
+ if x is _marker or pool is _marker:
+ raise ValueError('element is not a product of args')
+
+ pool = tuple(pool)
+ index = index * len(pool) + pool.index(x)
+
+ return index
+
+
+def combination_index(element, iterable):
+ """Equivalent to ``list(combinations(iterable, r)).index(element)``
+
+ The subsequences of *iterable* that are of length *r* can be ordered
+ lexicographically. :func:`combination_index` computes the index of the
+ first *element*, without computing the previous combinations.
+
+ >>> combination_index('adf', 'abcdefg')
+ 10
+
+ ``ValueError`` will be raised if the given *element* isn't one of the
+ combinations of *iterable*.
+ """
+ element = enumerate(element)
+ k, y = next(element, (None, None))
+ if k is None:
+ return 0
+
+ indexes = []
+ pool = enumerate(iterable)
+ for n, x in pool:
+ if x == y:
+ indexes.append(n)
+ tmp, y = next(element, (None, None))
+ if tmp is None:
+ break
+ else:
+ k = tmp
+ else:
+ raise ValueError('element is not a combination of iterable')
+
+ n, _ = last(pool, default=(n, None))
+
+ # Python versions below 3.8 don't have math.comb
+ index = 1
+ for i, j in enumerate(reversed(indexes), start=1):
+ j = n - j
+ if i <= j:
+ index += comb(j, i)
+
+ return comb(n + 1, k + 1) - index
+
+
+def combination_with_replacement_index(element, iterable):
+ """Equivalent to
+ ``list(combinations_with_replacement(iterable, r)).index(element)``
+
+ The subsequences with repetition of *iterable* that are of length *r* can
+ be ordered lexicographically. :func:`combination_with_replacement_index`
+ computes the index of the first *element*, without computing the previous
+ combinations with replacement.
+
+ >>> combination_with_replacement_index('adf', 'abcdefg')
+ 20
+
+ ``ValueError`` will be raised if the given *element* isn't one of the
+ combinations with replacement of *iterable*.
+ """
+ element = tuple(element)
+ l = len(element)
+ element = enumerate(element)
+
+ k, y = next(element, (None, None))
+ if k is None:
+ return 0
+
+ indexes = []
+ pool = tuple(iterable)
+ for n, x in enumerate(pool):
+ while x == y:
+ indexes.append(n)
+ tmp, y = next(element, (None, None))
+ if tmp is None:
+ break
+ else:
+ k = tmp
+ if y is None:
+ break
+ else:
+ raise ValueError(
+ 'element is not a combination with replacement of iterable'
+ )
+
+ n = len(pool)
+ occupations = [0] * n
+ for p in indexes:
+ occupations[p] += 1
+
+ index = 0
+ cumulative_sum = 0
+ for k in range(1, n):
+ cumulative_sum += occupations[k - 1]
+ j = l + n - 1 - k - cumulative_sum
+ i = n - k
+ if i <= j:
+ index += comb(j, i)
+
+ return index
+
+
+def permutation_index(element, iterable):
+ """Equivalent to ``list(permutations(iterable, r)).index(element)```
+
+ The subsequences of *iterable* that are of length *r* where order is
+ important can be ordered lexicographically. :func:`permutation_index`
+ computes the index of the first *element* directly, without computing
+ the previous permutations.
+
+ >>> permutation_index([1, 3, 2], range(5))
+ 19
+
+ ``ValueError`` will be raised if the given *element* isn't one of the
+ permutations of *iterable*.
+ """
+ index = 0
+ pool = list(iterable)
+ for i, x in zip(range(len(pool), -1, -1), element):
+ r = pool.index(x)
+ index = index * i + r
+ del pool[r]
+
+ return index
+
+
+class countable:
+ """Wrap *iterable* and keep a count of how many items have been consumed.
+
+ The ``items_seen`` attribute starts at ``0`` and increments as the iterable
+ is consumed:
+
+ >>> iterable = map(str, range(10))
+ >>> it = countable(iterable)
+ >>> it.items_seen
+ 0
+ >>> next(it), next(it)
+ ('0', '1')
+ >>> list(it)
+ ['2', '3', '4', '5', '6', '7', '8', '9']
+ >>> it.items_seen
+ 10
+ """
+
+ def __init__(self, iterable):
+ self._it = iter(iterable)
+ self.items_seen = 0
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ item = next(self._it)
+ self.items_seen += 1
+
+ return item
+
+
+def chunked_even(iterable, n):
+ """Break *iterable* into lists of approximately length *n*.
+ Items are distributed such the lengths of the lists differ by at most
+ 1 item.
+
+ >>> iterable = [1, 2, 3, 4, 5, 6, 7]
+ >>> n = 3
+ >>> list(chunked_even(iterable, n)) # List lengths: 3, 2, 2
+ [[1, 2, 3], [4, 5], [6, 7]]
+ >>> list(chunked(iterable, n)) # List lengths: 3, 3, 1
+ [[1, 2, 3], [4, 5, 6], [7]]
+
+ """
+
+ len_method = getattr(iterable, '__len__', None)
+
+ if len_method is None:
+ return _chunked_even_online(iterable, n)
+ else:
+ return _chunked_even_finite(iterable, len_method(), n)
+
+
+def _chunked_even_online(iterable, n):
+ buffer = []
+ maxbuf = n + (n - 2) * (n - 1)
+ for x in iterable:
+ buffer.append(x)
+ if len(buffer) == maxbuf:
+ yield buffer[:n]
+ buffer = buffer[n:]
+ yield from _chunked_even_finite(buffer, len(buffer), n)
+
+
+def _chunked_even_finite(iterable, N, n):
+ if N < 1:
+ return
+
+ # Lists are either size `full_size <= n` or `partial_size = full_size - 1`
+ q, r = divmod(N, n)
+ num_lists = q + (1 if r > 0 else 0)
+ q, r = divmod(N, num_lists)
+ full_size = q + (1 if r > 0 else 0)
+ partial_size = full_size - 1
+ num_full = N - partial_size * num_lists
+ num_partial = num_lists - num_full
+
+ # Yield num_full lists of full_size
+ partial_start_idx = num_full * full_size
+ if full_size > 0:
+ for i in range(0, partial_start_idx, full_size):
+ yield list(islice(iterable, i, i + full_size))
+
+ # Yield num_partial lists of partial_size
+ if partial_size > 0:
+ for i in range(
+ partial_start_idx,
+ partial_start_idx + (num_partial * partial_size),
+ partial_size,
+ ):
+ yield list(islice(iterable, i, i + partial_size))
+
+
+def zip_broadcast(*objects, scalar_types=(str, bytes), strict=False):
+ """A version of :func:`zip` that "broadcasts" any scalar
+ (i.e., non-iterable) items into output tuples.
+
+ >>> iterable_1 = [1, 2, 3]
+ >>> iterable_2 = ['a', 'b', 'c']
+ >>> scalar = '_'
+ >>> list(zip_broadcast(iterable_1, iterable_2, scalar))
+ [(1, 'a', '_'), (2, 'b', '_'), (3, 'c', '_')]
+
+ The *scalar_types* keyword argument determines what types are considered
+ scalar. It is set to ``(str, bytes)`` by default. Set it to ``None`` to
+ treat strings and byte strings as iterable:
+
+ >>> list(zip_broadcast('abc', 0, 'xyz', scalar_types=None))
+ [('a', 0, 'x'), ('b', 0, 'y'), ('c', 0, 'z')]
+
+ If the *strict* keyword argument is ``True``, then
+ ``UnequalIterablesError`` will be raised if any of the iterables have
+ different lengths.
+ """
+
+ def is_scalar(obj):
+ if scalar_types and isinstance(obj, scalar_types):
+ return True
+ try:
+ iter(obj)
+ except TypeError:
+ return True
+ else:
+ return False
+
+ size = len(objects)
+ if not size:
+ return
+
+ new_item = [None] * size
+ iterables, iterable_positions = [], []
+ for i, obj in enumerate(objects):
+ if is_scalar(obj):
+ new_item[i] = obj
+ else:
+ iterables.append(iter(obj))
+ iterable_positions.append(i)
+
+ if not iterables:
+ yield tuple(objects)
+ return
+
+ zipper = _zip_equal if strict else zip
+ for item in zipper(*iterables):
+ for i, new_item[i] in zip(iterable_positions, item):
+ pass
+ yield tuple(new_item)
+
+
+def unique_in_window(iterable, n, key=None):
+ """Yield the items from *iterable* that haven't been seen recently.
+ *n* is the size of the lookback window.
+
+ >>> iterable = [0, 1, 0, 2, 3, 0]
+ >>> n = 3
+ >>> list(unique_in_window(iterable, n))
+ [0, 1, 2, 3, 0]
+
+ The *key* function, if provided, will be used to determine uniqueness:
+
+ >>> list(unique_in_window('abAcda', 3, key=lambda x: x.lower()))
+ ['a', 'b', 'c', 'd', 'a']
+
+ The items in *iterable* must be hashable.
+
+ """
+ if n <= 0:
+ raise ValueError('n must be greater than 0')
+
+ window = deque(maxlen=n)
+ counts = defaultdict(int)
+ use_key = key is not None
+
+ for item in iterable:
+ if len(window) == n:
+ to_discard = window[0]
+ if counts[to_discard] == 1:
+ del counts[to_discard]
+ else:
+ counts[to_discard] -= 1
+
+ k = key(item) if use_key else item
+ if k not in counts:
+ yield item
+ counts[k] += 1
+ window.append(k)
+
+
+def duplicates_everseen(iterable, key=None):
+ """Yield duplicate elements after their first appearance.
+
+ >>> list(duplicates_everseen('mississippi'))
+ ['s', 'i', 's', 's', 'i', 'p', 'i']
+ >>> list(duplicates_everseen('AaaBbbCccAaa', str.lower))
+ ['a', 'a', 'b', 'b', 'c', 'c', 'A', 'a', 'a']
+
+ This function is analogous to :func:`unique_everseen` and is subject to
+ the same performance considerations.
+
+ """
+ seen_set = set()
+ seen_list = []
+ use_key = key is not None
+
+ for element in iterable:
+ k = key(element) if use_key else element
+ try:
+ if k not in seen_set:
+ seen_set.add(k)
+ else:
+ yield element
+ except TypeError:
+ if k not in seen_list:
+ seen_list.append(k)
+ else:
+ yield element
+
+
+def duplicates_justseen(iterable, key=None):
+ """Yields serially-duplicate elements after their first appearance.
+
+ >>> list(duplicates_justseen('mississippi'))
+ ['s', 's', 'p']
+ >>> list(duplicates_justseen('AaaBbbCccAaa', str.lower))
+ ['a', 'a', 'b', 'b', 'c', 'c', 'a', 'a']
+
+ This function is analogous to :func:`unique_justseen`.
+
+ """
+ return flatten(g for _, g in groupby(iterable, key) for _ in g)
+
+
+def classify_unique(iterable, key=None):
+ """Classify each element in terms of its uniqueness.
+
+ For each element in the input iterable, return a 3-tuple consisting of:
+
+ 1. The element itself
+ 2. ``False`` if the element is equal to the one preceding it in the input,
+ ``True`` otherwise (i.e. the equivalent of :func:`unique_justseen`)
+ 3. ``False`` if this element has been seen anywhere in the input before,
+ ``True`` otherwise (i.e. the equivalent of :func:`unique_everseen`)
+
+ >>> list(classify_unique('otto')) # doctest: +NORMALIZE_WHITESPACE
+ [('o', True, True),
+ ('t', True, True),
+ ('t', False, False),
+ ('o', True, False)]
+
+ This function is analogous to :func:`unique_everseen` and is subject to
+ the same performance considerations.
+
+ """
+ seen_set = set()
+ seen_list = []
+ use_key = key is not None
+ previous = None
+
+ for i, element in enumerate(iterable):
+ k = key(element) if use_key else element
+ is_unique_justseen = not i or previous != k
+ previous = k
+ is_unique_everseen = False
+ try:
+ if k not in seen_set:
+ seen_set.add(k)
+ is_unique_everseen = True
+ except TypeError:
+ if k not in seen_list:
+ seen_list.append(k)
+ is_unique_everseen = True
+ yield element, is_unique_justseen, is_unique_everseen
+
+
+def minmax(iterable_or_value, *others, key=None, default=_marker):
+ """Returns both the smallest and largest items in an iterable
+ or the largest of two or more arguments.
+
+ >>> minmax([3, 1, 5])
+ (1, 5)
+
+ >>> minmax(4, 2, 6)
+ (2, 6)
+
+ If a *key* function is provided, it will be used to transform the input
+ items for comparison.
+
+ >>> minmax([5, 30], key=str) # '30' sorts before '5'
+ (30, 5)
+
+ If a *default* value is provided, it will be returned if there are no
+ input items.
+
+ >>> minmax([], default=(0, 0))
+ (0, 0)
+
+ Otherwise ``ValueError`` is raised.
+
+ This function is based on the
+ `recipe `__ by
+ Raymond Hettinger and takes care to minimize the number of comparisons
+ performed.
+ """
+ iterable = (iterable_or_value, *others) if others else iterable_or_value
+
+ it = iter(iterable)
+
+ try:
+ lo = hi = next(it)
+ except StopIteration as e:
+ if default is _marker:
+ raise ValueError(
+ '`minmax()` argument is an empty iterable. '
+ 'Provide a `default` value to suppress this error.'
+ ) from e
+ return default
+
+ # Different branches depending on the presence of key. This saves a lot
+ # of unimportant copies which would slow the "key=None" branch
+ # significantly down.
+ if key is None:
+ for x, y in zip_longest(it, it, fillvalue=lo):
+ if y < x:
+ x, y = y, x
+ if x < lo:
+ lo = x
+ if hi < y:
+ hi = y
+
+ else:
+ lo_key = hi_key = key(lo)
+
+ for x, y in zip_longest(it, it, fillvalue=lo):
+ x_key, y_key = key(x), key(y)
+
+ if y_key < x_key:
+ x, y, x_key, y_key = y, x, y_key, x_key
+ if x_key < lo_key:
+ lo, lo_key = x, x_key
+ if hi_key < y_key:
+ hi, hi_key = y, y_key
+
+ return lo, hi
+
+
+def constrained_batches(
+ iterable, max_size, max_count=None, get_len=len, strict=True
+):
+ """Yield batches of items from *iterable* with a combined size limited by
+ *max_size*.
+
+ >>> iterable = [b'12345', b'123', b'12345678', b'1', b'1', b'12', b'1']
+ >>> list(constrained_batches(iterable, 10))
+ [(b'12345', b'123'), (b'12345678', b'1', b'1'), (b'12', b'1')]
+
+ If a *max_count* is supplied, the number of items per batch is also
+ limited:
+
+ >>> iterable = [b'12345', b'123', b'12345678', b'1', b'1', b'12', b'1']
+ >>> list(constrained_batches(iterable, 10, max_count = 2))
+ [(b'12345', b'123'), (b'12345678', b'1'), (b'1', b'12'), (b'1',)]
+
+ If a *get_len* function is supplied, use that instead of :func:`len` to
+ determine item size.
+
+ If *strict* is ``True``, raise ``ValueError`` if any single item is bigger
+ than *max_size*. Otherwise, allow single items to exceed *max_size*.
+ """
+ if max_size <= 0:
+ raise ValueError('maximum size must be greater than zero')
+
+ batch = []
+ batch_size = 0
+ batch_count = 0
+ for item in iterable:
+ item_len = get_len(item)
+ if strict and item_len > max_size:
+ raise ValueError('item size exceeds maximum size')
+
+ reached_count = batch_count == max_count
+ reached_size = item_len + batch_size > max_size
+ if batch_count and (reached_size or reached_count):
+ yield tuple(batch)
+ batch.clear()
+ batch_size = 0
+ batch_count = 0
+
+ batch.append(item)
+ batch_size += item_len
+ batch_count += 1
+
+ if batch:
+ yield tuple(batch)
+
+
+def gray_product(*iterables):
+ """Like :func:`itertools.product`, but return tuples in an order such
+ that only one element in the generated tuple changes from one iteration
+ to the next.
+
+ >>> list(gray_product('AB','CD'))
+ [('A', 'C'), ('B', 'C'), ('B', 'D'), ('A', 'D')]
+
+ This function consumes all of the input iterables before producing output.
+ If any of the input iterables have fewer than two items, ``ValueError``
+ is raised.
+
+ For information on the algorithm, see
+ `this section `__
+ of Donald Knuth's *The Art of Computer Programming*.
+ """
+ all_iterables = tuple(tuple(x) for x in iterables)
+ iterable_count = len(all_iterables)
+ for iterable in all_iterables:
+ if len(iterable) < 2:
+ raise ValueError("each iterable must have two or more items")
+
+ # This is based on "Algorithm H" from section 7.2.1.1, page 20.
+ # a holds the indexes of the source iterables for the n-tuple to be yielded
+ # f is the array of "focus pointers"
+ # o is the array of "directions"
+ a = [0] * iterable_count
+ f = list(range(iterable_count + 1))
+ o = [1] * iterable_count
+ while True:
+ yield tuple(all_iterables[i][a[i]] for i in range(iterable_count))
+ j = f[0]
+ f[0] = 0
+ if j == iterable_count:
+ break
+ a[j] = a[j] + o[j]
+ if a[j] == 0 or a[j] == len(all_iterables[j]) - 1:
+ o[j] = -o[j]
+ f[j] = f[j + 1]
+ f[j + 1] = j + 1
+
+
+def partial_product(*iterables):
+ """Yields tuples containing one item from each iterator, with subsequent
+ tuples changing a single item at a time by advancing each iterator until it
+ is exhausted. This sequence guarantees every value in each iterable is
+ output at least once without generating all possible combinations.
+
+ This may be useful, for example, when testing an expensive function.
+
+ >>> list(partial_product('AB', 'C', 'DEF'))
+ [('A', 'C', 'D'), ('B', 'C', 'D'), ('B', 'C', 'E'), ('B', 'C', 'F')]
+ """
+
+ iterators = list(map(iter, iterables))
+
+ try:
+ prod = [next(it) for it in iterators]
+ except StopIteration:
+ return
+ yield tuple(prod)
+
+ for i, it in enumerate(iterators):
+ for prod[i] in it:
+ yield tuple(prod)
+
+
+def takewhile_inclusive(predicate, iterable):
+ """A variant of :func:`takewhile` that yields one additional element.
+
+ >>> list(takewhile_inclusive(lambda x: x < 5, [1, 4, 6, 4, 1]))
+ [1, 4, 6]
+
+ :func:`takewhile` would return ``[1, 4]``.
+ """
+ for x in iterable:
+ yield x
+ if not predicate(x):
+ break
+
+
+def outer_product(func, xs, ys, *args, **kwargs):
+ """A generalized outer product that applies a binary function to all
+ pairs of items. Returns a 2D matrix with ``len(xs)`` rows and ``len(ys)``
+ columns.
+ Also accepts ``*args`` and ``**kwargs`` that are passed to ``func``.
+
+ Multiplication table:
+
+ >>> list(outer_product(mul, range(1, 4), range(1, 6)))
+ [(1, 2, 3, 4, 5), (2, 4, 6, 8, 10), (3, 6, 9, 12, 15)]
+
+ Cross tabulation:
+
+ >>> xs = ['A', 'B', 'A', 'A', 'B', 'B', 'A', 'A', 'B', 'B']
+ >>> ys = ['X', 'X', 'X', 'Y', 'Z', 'Z', 'Y', 'Y', 'Z', 'Z']
+ >>> rows = list(zip(xs, ys))
+ >>> count_rows = lambda x, y: rows.count((x, y))
+ >>> list(outer_product(count_rows, sorted(set(xs)), sorted(set(ys))))
+ [(2, 3, 0), (1, 0, 4)]
+
+ Usage with ``*args`` and ``**kwargs``:
+
+ >>> animals = ['cat', 'wolf', 'mouse']
+ >>> list(outer_product(min, animals, animals, key=len))
+ [('cat', 'cat', 'cat'), ('cat', 'wolf', 'wolf'), ('cat', 'wolf', 'mouse')]
+ """
+ ys = tuple(ys)
+ return batched(
+ starmap(lambda x, y: func(x, y, *args, **kwargs), product(xs, ys)),
+ n=len(ys),
+ )
+
+
+def iter_suppress(iterable, *exceptions):
+ """Yield each of the items from *iterable*. If the iteration raises one of
+ the specified *exceptions*, that exception will be suppressed and iteration
+ will stop.
+
+ >>> from itertools import chain
+ >>> def breaks_at_five(x):
+ ... while True:
+ ... if x >= 5:
+ ... raise RuntimeError
+ ... yield x
+ ... x += 1
+ >>> it_1 = iter_suppress(breaks_at_five(1), RuntimeError)
+ >>> it_2 = iter_suppress(breaks_at_five(2), RuntimeError)
+ >>> list(chain(it_1, it_2))
+ [1, 2, 3, 4, 2, 3, 4]
+ """
+ try:
+ yield from iterable
+ except exceptions:
+ return
+
+
+def filter_map(func, iterable):
+ """Apply *func* to every element of *iterable*, yielding only those which
+ are not ``None``.
+
+ >>> elems = ['1', 'a', '2', 'b', '3']
+ >>> list(filter_map(lambda s: int(s) if s.isnumeric() else None, elems))
+ [1, 2, 3]
+ """
+ for x in iterable:
+ y = func(x)
+ if y is not None:
+ yield y
diff --git a/venv/lib/python3.10/site-packages/more_itertools/more.pyi b/venv/lib/python3.10/site-packages/more_itertools/more.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..9a5fc911a3ed7249b95cbda2433924b6aced2ae7
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/more_itertools/more.pyi
@@ -0,0 +1,695 @@
+"""Stubs for more_itertools.more"""
+from __future__ import annotations
+
+from types import TracebackType
+from typing import (
+ Any,
+ Callable,
+ Container,
+ ContextManager,
+ Generic,
+ Hashable,
+ Iterable,
+ Iterator,
+ overload,
+ Reversible,
+ Sequence,
+ Sized,
+ Type,
+ TypeVar,
+ type_check_only,
+)
+from typing_extensions import Protocol
+
+# Type and type variable definitions
+_T = TypeVar('_T')
+_T1 = TypeVar('_T1')
+_T2 = TypeVar('_T2')
+_U = TypeVar('_U')
+_V = TypeVar('_V')
+_W = TypeVar('_W')
+_T_co = TypeVar('_T_co', covariant=True)
+_GenFn = TypeVar('_GenFn', bound=Callable[..., Iterator[Any]])
+_Raisable = BaseException | Type[BaseException]
+
+@type_check_only
+class _SizedIterable(Protocol[_T_co], Sized, Iterable[_T_co]): ...
+
+@type_check_only
+class _SizedReversible(Protocol[_T_co], Sized, Reversible[_T_co]): ...
+
+@type_check_only
+class _SupportsSlicing(Protocol[_T_co]):
+ def __getitem__(self, __k: slice) -> _T_co: ...
+
+def chunked(
+ iterable: Iterable[_T], n: int | None, strict: bool = ...
+) -> Iterator[list[_T]]: ...
+@overload
+def first(iterable: Iterable[_T]) -> _T: ...
+@overload
+def first(iterable: Iterable[_T], default: _U) -> _T | _U: ...
+@overload
+def last(iterable: Iterable[_T]) -> _T: ...
+@overload
+def last(iterable: Iterable[_T], default: _U) -> _T | _U: ...
+@overload
+def nth_or_last(iterable: Iterable[_T], n: int) -> _T: ...
+@overload
+def nth_or_last(iterable: Iterable[_T], n: int, default: _U) -> _T | _U: ...
+
+class peekable(Generic[_T], Iterator[_T]):
+ def __init__(self, iterable: Iterable[_T]) -> None: ...
+ def __iter__(self) -> peekable[_T]: ...
+ def __bool__(self) -> bool: ...
+ @overload
+ def peek(self) -> _T: ...
+ @overload
+ def peek(self, default: _U) -> _T | _U: ...
+ def prepend(self, *items: _T) -> None: ...
+ def __next__(self) -> _T: ...
+ @overload
+ def __getitem__(self, index: int) -> _T: ...
+ @overload
+ def __getitem__(self, index: slice) -> list[_T]: ...
+
+def consumer(func: _GenFn) -> _GenFn: ...
+def ilen(iterable: Iterable[_T]) -> int: ...
+def iterate(func: Callable[[_T], _T], start: _T) -> Iterator[_T]: ...
+def with_iter(
+ context_manager: ContextManager[Iterable[_T]],
+) -> Iterator[_T]: ...
+def one(
+ iterable: Iterable[_T],
+ too_short: _Raisable | None = ...,
+ too_long: _Raisable | None = ...,
+) -> _T: ...
+def raise_(exception: _Raisable, *args: Any) -> None: ...
+def strictly_n(
+ iterable: Iterable[_T],
+ n: int,
+ too_short: _GenFn | None = ...,
+ too_long: _GenFn | None = ...,
+) -> list[_T]: ...
+def distinct_permutations(
+ iterable: Iterable[_T], r: int | None = ...
+) -> Iterator[tuple[_T, ...]]: ...
+def intersperse(
+ e: _U, iterable: Iterable[_T], n: int = ...
+) -> Iterator[_T | _U]: ...
+def unique_to_each(*iterables: Iterable[_T]) -> list[list[_T]]: ...
+@overload
+def windowed(
+ seq: Iterable[_T], n: int, *, step: int = ...
+) -> Iterator[tuple[_T | None, ...]]: ...
+@overload
+def windowed(
+ seq: Iterable[_T], n: int, fillvalue: _U, step: int = ...
+) -> Iterator[tuple[_T | _U, ...]]: ...
+def substrings(iterable: Iterable[_T]) -> Iterator[tuple[_T, ...]]: ...
+def substrings_indexes(
+ seq: Sequence[_T], reverse: bool = ...
+) -> Iterator[tuple[Sequence[_T], int, int]]: ...
+
+class bucket(Generic[_T, _U], Container[_U]):
+ def __init__(
+ self,
+ iterable: Iterable[_T],
+ key: Callable[[_T], _U],
+ validator: Callable[[_U], object] | None = ...,
+ ) -> None: ...
+ def __contains__(self, value: object) -> bool: ...
+ def __iter__(self) -> Iterator[_U]: ...
+ def __getitem__(self, value: object) -> Iterator[_T]: ...
+
+def spy(
+ iterable: Iterable[_T], n: int = ...
+) -> tuple[list[_T], Iterator[_T]]: ...
+def interleave(*iterables: Iterable[_T]) -> Iterator[_T]: ...
+def interleave_longest(*iterables: Iterable[_T]) -> Iterator[_T]: ...
+def interleave_evenly(
+ iterables: list[Iterable[_T]], lengths: list[int] | None = ...
+) -> Iterator[_T]: ...
+def collapse(
+ iterable: Iterable[Any],
+ base_type: type | None = ...,
+ levels: int | None = ...,
+) -> Iterator[Any]: ...
+@overload
+def side_effect(
+ func: Callable[[_T], object],
+ iterable: Iterable[_T],
+ chunk_size: None = ...,
+ before: Callable[[], object] | None = ...,
+ after: Callable[[], object] | None = ...,
+) -> Iterator[_T]: ...
+@overload
+def side_effect(
+ func: Callable[[list[_T]], object],
+ iterable: Iterable[_T],
+ chunk_size: int,
+ before: Callable[[], object] | None = ...,
+ after: Callable[[], object] | None = ...,
+) -> Iterator[_T]: ...
+def sliced(
+ seq: _SupportsSlicing[_T], n: int, strict: bool = ...
+) -> Iterator[_T]: ...
+def split_at(
+ iterable: Iterable[_T],
+ pred: Callable[[_T], object],
+ maxsplit: int = ...,
+ keep_separator: bool = ...,
+) -> Iterator[list[_T]]: ...
+def split_before(
+ iterable: Iterable[_T], pred: Callable[[_T], object], maxsplit: int = ...
+) -> Iterator[list[_T]]: ...
+def split_after(
+ iterable: Iterable[_T], pred: Callable[[_T], object], maxsplit: int = ...
+) -> Iterator[list[_T]]: ...
+def split_when(
+ iterable: Iterable[_T],
+ pred: Callable[[_T, _T], object],
+ maxsplit: int = ...,
+) -> Iterator[list[_T]]: ...
+def split_into(
+ iterable: Iterable[_T], sizes: Iterable[int | None]
+) -> Iterator[list[_T]]: ...
+@overload
+def padded(
+ iterable: Iterable[_T],
+ *,
+ n: int | None = ...,
+ next_multiple: bool = ...,
+) -> Iterator[_T | None]: ...
+@overload
+def padded(
+ iterable: Iterable[_T],
+ fillvalue: _U,
+ n: int | None = ...,
+ next_multiple: bool = ...,
+) -> Iterator[_T | _U]: ...
+@overload
+def repeat_last(iterable: Iterable[_T]) -> Iterator[_T]: ...
+@overload
+def repeat_last(iterable: Iterable[_T], default: _U) -> Iterator[_T | _U]: ...
+def distribute(n: int, iterable: Iterable[_T]) -> list[Iterator[_T]]: ...
+@overload
+def stagger(
+ iterable: Iterable[_T],
+ offsets: _SizedIterable[int] = ...,
+ longest: bool = ...,
+) -> Iterator[tuple[_T | None, ...]]: ...
+@overload
+def stagger(
+ iterable: Iterable[_T],
+ offsets: _SizedIterable[int] = ...,
+ longest: bool = ...,
+ fillvalue: _U = ...,
+) -> Iterator[tuple[_T | _U, ...]]: ...
+
+class UnequalIterablesError(ValueError):
+ def __init__(self, details: tuple[int, int, int] | None = ...) -> None: ...
+
+@overload
+def zip_equal(__iter1: Iterable[_T1]) -> Iterator[tuple[_T1]]: ...
+@overload
+def zip_equal(
+ __iter1: Iterable[_T1], __iter2: Iterable[_T2]
+) -> Iterator[tuple[_T1, _T2]]: ...
+@overload
+def zip_equal(
+ __iter1: Iterable[_T],
+ __iter2: Iterable[_T],
+ __iter3: Iterable[_T],
+ *iterables: Iterable[_T],
+) -> Iterator[tuple[_T, ...]]: ...
+@overload
+def zip_offset(
+ __iter1: Iterable[_T1],
+ *,
+ offsets: _SizedIterable[int],
+ longest: bool = ...,
+ fillvalue: None = None,
+) -> Iterator[tuple[_T1 | None]]: ...
+@overload
+def zip_offset(
+ __iter1: Iterable[_T1],
+ __iter2: Iterable[_T2],
+ *,
+ offsets: _SizedIterable[int],
+ longest: bool = ...,
+ fillvalue: None = None,
+) -> Iterator[tuple[_T1 | None, _T2 | None]]: ...
+@overload
+def zip_offset(
+ __iter1: Iterable[_T],
+ __iter2: Iterable[_T],
+ __iter3: Iterable[_T],
+ *iterables: Iterable[_T],
+ offsets: _SizedIterable[int],
+ longest: bool = ...,
+ fillvalue: None = None,
+) -> Iterator[tuple[_T | None, ...]]: ...
+@overload
+def zip_offset(
+ __iter1: Iterable[_T1],
+ *,
+ offsets: _SizedIterable[int],
+ longest: bool = ...,
+ fillvalue: _U,
+) -> Iterator[tuple[_T1 | _U]]: ...
+@overload
+def zip_offset(
+ __iter1: Iterable[_T1],
+ __iter2: Iterable[_T2],
+ *,
+ offsets: _SizedIterable[int],
+ longest: bool = ...,
+ fillvalue: _U,
+) -> Iterator[tuple[_T1 | _U, _T2 | _U]]: ...
+@overload
+def zip_offset(
+ __iter1: Iterable[_T],
+ __iter2: Iterable[_T],
+ __iter3: Iterable[_T],
+ *iterables: Iterable[_T],
+ offsets: _SizedIterable[int],
+ longest: bool = ...,
+ fillvalue: _U,
+) -> Iterator[tuple[_T | _U, ...]]: ...
+def sort_together(
+ iterables: Iterable[Iterable[_T]],
+ key_list: Iterable[int] = ...,
+ key: Callable[..., Any] | None = ...,
+ reverse: bool = ...,
+) -> list[tuple[_T, ...]]: ...
+def unzip(iterable: Iterable[Sequence[_T]]) -> tuple[Iterator[_T], ...]: ...
+def divide(n: int, iterable: Iterable[_T]) -> list[Iterator[_T]]: ...
+def always_iterable(
+ obj: object,
+ base_type: type | tuple[type | tuple[Any, ...], ...] | None = ...,
+) -> Iterator[Any]: ...
+def adjacent(
+ predicate: Callable[[_T], bool],
+ iterable: Iterable[_T],
+ distance: int = ...,
+) -> Iterator[tuple[bool, _T]]: ...
+@overload
+def groupby_transform(
+ iterable: Iterable[_T],
+ keyfunc: None = None,
+ valuefunc: None = None,
+ reducefunc: None = None,
+) -> Iterator[tuple[_T, Iterator[_T]]]: ...
+@overload
+def groupby_transform(
+ iterable: Iterable[_T],
+ keyfunc: Callable[[_T], _U],
+ valuefunc: None,
+ reducefunc: None,
+) -> Iterator[tuple[_U, Iterator[_T]]]: ...
+@overload
+def groupby_transform(
+ iterable: Iterable[_T],
+ keyfunc: None,
+ valuefunc: Callable[[_T], _V],
+ reducefunc: None,
+) -> Iterable[tuple[_T, Iterable[_V]]]: ...
+@overload
+def groupby_transform(
+ iterable: Iterable[_T],
+ keyfunc: Callable[[_T], _U],
+ valuefunc: Callable[[_T], _V],
+ reducefunc: None,
+) -> Iterable[tuple[_U, Iterator[_V]]]: ...
+@overload
+def groupby_transform(
+ iterable: Iterable[_T],
+ keyfunc: None,
+ valuefunc: None,
+ reducefunc: Callable[[Iterator[_T]], _W],
+) -> Iterable[tuple[_T, _W]]: ...
+@overload
+def groupby_transform(
+ iterable: Iterable[_T],
+ keyfunc: Callable[[_T], _U],
+ valuefunc: None,
+ reducefunc: Callable[[Iterator[_T]], _W],
+) -> Iterable[tuple[_U, _W]]: ...
+@overload
+def groupby_transform(
+ iterable: Iterable[_T],
+ keyfunc: None,
+ valuefunc: Callable[[_T], _V],
+ reducefunc: Callable[[Iterable[_V]], _W],
+) -> Iterable[tuple[_T, _W]]: ...
+@overload
+def groupby_transform(
+ iterable: Iterable[_T],
+ keyfunc: Callable[[_T], _U],
+ valuefunc: Callable[[_T], _V],
+ reducefunc: Callable[[Iterable[_V]], _W],
+) -> Iterable[tuple[_U, _W]]: ...
+
+class numeric_range(Generic[_T, _U], Sequence[_T], Hashable, Reversible[_T]):
+ @overload
+ def __init__(self, __stop: _T) -> None: ...
+ @overload
+ def __init__(self, __start: _T, __stop: _T) -> None: ...
+ @overload
+ def __init__(self, __start: _T, __stop: _T, __step: _U) -> None: ...
+ def __bool__(self) -> bool: ...
+ def __contains__(self, elem: object) -> bool: ...
+ def __eq__(self, other: object) -> bool: ...
+ @overload
+ def __getitem__(self, key: int) -> _T: ...
+ @overload
+ def __getitem__(self, key: slice) -> numeric_range[_T, _U]: ...
+ def __hash__(self) -> int: ...
+ def __iter__(self) -> Iterator[_T]: ...
+ def __len__(self) -> int: ...
+ def __reduce__(
+ self,
+ ) -> tuple[Type[numeric_range[_T, _U]], tuple[_T, _T, _U]]: ...
+ def __repr__(self) -> str: ...
+ def __reversed__(self) -> Iterator[_T]: ...
+ def count(self, value: _T) -> int: ...
+ def index(self, value: _T) -> int: ... # type: ignore
+
+def count_cycle(
+ iterable: Iterable[_T], n: int | None = ...
+) -> Iterable[tuple[int, _T]]: ...
+def mark_ends(
+ iterable: Iterable[_T],
+) -> Iterable[tuple[bool, bool, _T]]: ...
+def locate(
+ iterable: Iterable[_T],
+ pred: Callable[..., Any] = ...,
+ window_size: int | None = ...,
+) -> Iterator[int]: ...
+def lstrip(
+ iterable: Iterable[_T], pred: Callable[[_T], object]
+) -> Iterator[_T]: ...
+def rstrip(
+ iterable: Iterable[_T], pred: Callable[[_T], object]
+) -> Iterator[_T]: ...
+def strip(
+ iterable: Iterable[_T], pred: Callable[[_T], object]
+) -> Iterator[_T]: ...
+
+class islice_extended(Generic[_T], Iterator[_T]):
+ def __init__(self, iterable: Iterable[_T], *args: int | None) -> None: ...
+ def __iter__(self) -> islice_extended[_T]: ...
+ def __next__(self) -> _T: ...
+ def __getitem__(self, index: slice) -> islice_extended[_T]: ...
+
+def always_reversible(iterable: Iterable[_T]) -> Iterator[_T]: ...
+def consecutive_groups(
+ iterable: Iterable[_T], ordering: Callable[[_T], int] = ...
+) -> Iterator[Iterator[_T]]: ...
+@overload
+def difference(
+ iterable: Iterable[_T],
+ func: Callable[[_T, _T], _U] = ...,
+ *,
+ initial: None = ...,
+) -> Iterator[_T | _U]: ...
+@overload
+def difference(
+ iterable: Iterable[_T], func: Callable[[_T, _T], _U] = ..., *, initial: _U
+) -> Iterator[_U]: ...
+
+class SequenceView(Generic[_T], Sequence[_T]):
+ def __init__(self, target: Sequence[_T]) -> None: ...
+ @overload
+ def __getitem__(self, index: int) -> _T: ...
+ @overload
+ def __getitem__(self, index: slice) -> Sequence[_T]: ...
+ def __len__(self) -> int: ...
+
+class seekable(Generic[_T], Iterator[_T]):
+ def __init__(
+ self, iterable: Iterable[_T], maxlen: int | None = ...
+ ) -> None: ...
+ def __iter__(self) -> seekable[_T]: ...
+ def __next__(self) -> _T: ...
+ def __bool__(self) -> bool: ...
+ @overload
+ def peek(self) -> _T: ...
+ @overload
+ def peek(self, default: _U) -> _T | _U: ...
+ def elements(self) -> SequenceView[_T]: ...
+ def seek(self, index: int) -> None: ...
+ def relative_seek(self, count: int) -> None: ...
+
+class run_length:
+ @staticmethod
+ def encode(iterable: Iterable[_T]) -> Iterator[tuple[_T, int]]: ...
+ @staticmethod
+ def decode(iterable: Iterable[tuple[_T, int]]) -> Iterator[_T]: ...
+
+def exactly_n(
+ iterable: Iterable[_T], n: int, predicate: Callable[[_T], object] = ...
+) -> bool: ...
+def circular_shifts(iterable: Iterable[_T]) -> list[tuple[_T, ...]]: ...
+def make_decorator(
+ wrapping_func: Callable[..., _U], result_index: int = ...
+) -> Callable[..., Callable[[Callable[..., Any]], Callable[..., _U]]]: ...
+@overload
+def map_reduce(
+ iterable: Iterable[_T],
+ keyfunc: Callable[[_T], _U],
+ valuefunc: None = ...,
+ reducefunc: None = ...,
+) -> dict[_U, list[_T]]: ...
+@overload
+def map_reduce(
+ iterable: Iterable[_T],
+ keyfunc: Callable[[_T], _U],
+ valuefunc: Callable[[_T], _V],
+ reducefunc: None = ...,
+) -> dict[_U, list[_V]]: ...
+@overload
+def map_reduce(
+ iterable: Iterable[_T],
+ keyfunc: Callable[[_T], _U],
+ valuefunc: None = ...,
+ reducefunc: Callable[[list[_T]], _W] = ...,
+) -> dict[_U, _W]: ...
+@overload
+def map_reduce(
+ iterable: Iterable[_T],
+ keyfunc: Callable[[_T], _U],
+ valuefunc: Callable[[_T], _V],
+ reducefunc: Callable[[list[_V]], _W],
+) -> dict[_U, _W]: ...
+def rlocate(
+ iterable: Iterable[_T],
+ pred: Callable[..., object] = ...,
+ window_size: int | None = ...,
+) -> Iterator[int]: ...
+def replace(
+ iterable: Iterable[_T],
+ pred: Callable[..., object],
+ substitutes: Iterable[_U],
+ count: int | None = ...,
+ window_size: int = ...,
+) -> Iterator[_T | _U]: ...
+def partitions(iterable: Iterable[_T]) -> Iterator[list[list[_T]]]: ...
+def set_partitions(
+ iterable: Iterable[_T], k: int | None = ...
+) -> Iterator[list[list[_T]]]: ...
+
+class time_limited(Generic[_T], Iterator[_T]):
+ def __init__(
+ self, limit_seconds: float, iterable: Iterable[_T]
+ ) -> None: ...
+ def __iter__(self) -> islice_extended[_T]: ...
+ def __next__(self) -> _T: ...
+
+@overload
+def only(
+ iterable: Iterable[_T], *, too_long: _Raisable | None = ...
+) -> _T | None: ...
+@overload
+def only(
+ iterable: Iterable[_T], default: _U, too_long: _Raisable | None = ...
+) -> _T | _U: ...
+def ichunked(iterable: Iterable[_T], n: int) -> Iterator[Iterator[_T]]: ...
+def distinct_combinations(
+ iterable: Iterable[_T], r: int
+) -> Iterator[tuple[_T, ...]]: ...
+def filter_except(
+ validator: Callable[[Any], object],
+ iterable: Iterable[_T],
+ *exceptions: Type[BaseException],
+) -> Iterator[_T]: ...
+def map_except(
+ function: Callable[[Any], _U],
+ iterable: Iterable[_T],
+ *exceptions: Type[BaseException],
+) -> Iterator[_U]: ...
+def map_if(
+ iterable: Iterable[Any],
+ pred: Callable[[Any], bool],
+ func: Callable[[Any], Any],
+ func_else: Callable[[Any], Any] | None = ...,
+) -> Iterator[Any]: ...
+def sample(
+ iterable: Iterable[_T],
+ k: int,
+ weights: Iterable[float] | None = ...,
+) -> list[_T]: ...
+def is_sorted(
+ iterable: Iterable[_T],
+ key: Callable[[_T], _U] | None = ...,
+ reverse: bool = False,
+ strict: bool = False,
+) -> bool: ...
+
+class AbortThread(BaseException):
+ pass
+
+class callback_iter(Generic[_T], Iterator[_T]):
+ def __init__(
+ self,
+ func: Callable[..., Any],
+ callback_kwd: str = ...,
+ wait_seconds: float = ...,
+ ) -> None: ...
+ def __enter__(self) -> callback_iter[_T]: ...
+ def __exit__(
+ self,
+ exc_type: Type[BaseException] | None,
+ exc_value: BaseException | None,
+ traceback: TracebackType | None,
+ ) -> bool | None: ...
+ def __iter__(self) -> callback_iter[_T]: ...
+ def __next__(self) -> _T: ...
+ def _reader(self) -> Iterator[_T]: ...
+ @property
+ def done(self) -> bool: ...
+ @property
+ def result(self) -> Any: ...
+
+def windowed_complete(
+ iterable: Iterable[_T], n: int
+) -> Iterator[tuple[_T, ...]]: ...
+def all_unique(
+ iterable: Iterable[_T], key: Callable[[_T], _U] | None = ...
+) -> bool: ...
+def nth_product(index: int, *args: Iterable[_T]) -> tuple[_T, ...]: ...
+def nth_combination_with_replacement(
+ iterable: Iterable[_T], r: int, index: int
+) -> tuple[_T, ...]: ...
+def nth_permutation(
+ iterable: Iterable[_T], r: int, index: int
+) -> tuple[_T, ...]: ...
+def value_chain(*args: _T | Iterable[_T]) -> Iterable[_T]: ...
+def product_index(element: Iterable[_T], *args: Iterable[_T]) -> int: ...
+def combination_index(
+ element: Iterable[_T], iterable: Iterable[_T]
+) -> int: ...
+def combination_with_replacement_index(
+ element: Iterable[_T], iterable: Iterable[_T]
+) -> int: ...
+def permutation_index(
+ element: Iterable[_T], iterable: Iterable[_T]
+) -> int: ...
+def repeat_each(iterable: Iterable[_T], n: int = ...) -> Iterator[_T]: ...
+
+class countable(Generic[_T], Iterator[_T]):
+ def __init__(self, iterable: Iterable[_T]) -> None: ...
+ def __iter__(self) -> countable[_T]: ...
+ def __next__(self) -> _T: ...
+
+def chunked_even(iterable: Iterable[_T], n: int) -> Iterator[list[_T]]: ...
+def zip_broadcast(
+ *objects: _T | Iterable[_T],
+ scalar_types: type | tuple[type | tuple[Any, ...], ...] | None = ...,
+ strict: bool = ...,
+) -> Iterable[tuple[_T, ...]]: ...
+def unique_in_window(
+ iterable: Iterable[_T], n: int, key: Callable[[_T], _U] | None = ...
+) -> Iterator[_T]: ...
+def duplicates_everseen(
+ iterable: Iterable[_T], key: Callable[[_T], _U] | None = ...
+) -> Iterator[_T]: ...
+def duplicates_justseen(
+ iterable: Iterable[_T], key: Callable[[_T], _U] | None = ...
+) -> Iterator[_T]: ...
+def classify_unique(
+ iterable: Iterable[_T], key: Callable[[_T], _U] | None = ...
+) -> Iterator[tuple[_T, bool, bool]]: ...
+
+class _SupportsLessThan(Protocol):
+ def __lt__(self, __other: Any) -> bool: ...
+
+_SupportsLessThanT = TypeVar("_SupportsLessThanT", bound=_SupportsLessThan)
+
+@overload
+def minmax(
+ iterable_or_value: Iterable[_SupportsLessThanT], *, key: None = None
+) -> tuple[_SupportsLessThanT, _SupportsLessThanT]: ...
+@overload
+def minmax(
+ iterable_or_value: Iterable[_T], *, key: Callable[[_T], _SupportsLessThan]
+) -> tuple[_T, _T]: ...
+@overload
+def minmax(
+ iterable_or_value: Iterable[_SupportsLessThanT],
+ *,
+ key: None = None,
+ default: _U,
+) -> _U | tuple[_SupportsLessThanT, _SupportsLessThanT]: ...
+@overload
+def minmax(
+ iterable_or_value: Iterable[_T],
+ *,
+ key: Callable[[_T], _SupportsLessThan],
+ default: _U,
+) -> _U | tuple[_T, _T]: ...
+@overload
+def minmax(
+ iterable_or_value: _SupportsLessThanT,
+ __other: _SupportsLessThanT,
+ *others: _SupportsLessThanT,
+) -> tuple[_SupportsLessThanT, _SupportsLessThanT]: ...
+@overload
+def minmax(
+ iterable_or_value: _T,
+ __other: _T,
+ *others: _T,
+ key: Callable[[_T], _SupportsLessThan],
+) -> tuple[_T, _T]: ...
+def longest_common_prefix(
+ iterables: Iterable[Iterable[_T]],
+) -> Iterator[_T]: ...
+def iequals(*iterables: Iterable[Any]) -> bool: ...
+def constrained_batches(
+ iterable: Iterable[_T],
+ max_size: int,
+ max_count: int | None = ...,
+ get_len: Callable[[_T], object] = ...,
+ strict: bool = ...,
+) -> Iterator[tuple[_T]]: ...
+def gray_product(*iterables: Iterable[_T]) -> Iterator[tuple[_T, ...]]: ...
+def partial_product(*iterables: Iterable[_T]) -> Iterator[tuple[_T, ...]]: ...
+def takewhile_inclusive(
+ predicate: Callable[[_T], bool], iterable: Iterable[_T]
+) -> Iterator[_T]: ...
+def outer_product(
+ func: Callable[[_T, _U], _V],
+ xs: Iterable[_T],
+ ys: Iterable[_U],
+ *args: Any,
+ **kwargs: Any,
+) -> Iterator[tuple[_V, ...]]: ...
+def iter_suppress(
+ iterable: Iterable[_T],
+ *exceptions: Type[BaseException],
+) -> Iterator[_T]: ...
+def filter_map(
+ func: Callable[[_T], _V | None],
+ iterable: Iterable[_T],
+) -> Iterator[_V]: ...
diff --git a/venv/lib/python3.10/site-packages/more_itertools/py.typed b/venv/lib/python3.10/site-packages/more_itertools/py.typed
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/lib/python3.10/site-packages/more_itertools/recipes.py b/venv/lib/python3.10/site-packages/more_itertools/recipes.py
new file mode 100644
index 0000000000000000000000000000000000000000..145e3cb5bd6bd5b916e3544e4b042a7ed203621a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/more_itertools/recipes.py
@@ -0,0 +1,1012 @@
+"""Imported from the recipes section of the itertools documentation.
+
+All functions taken from the recipes section of the itertools library docs
+[1]_.
+Some backward-compatible usability improvements have been made.
+
+.. [1] http://docs.python.org/library/itertools.html#recipes
+
+"""
+import math
+import operator
+
+from collections import deque
+from collections.abc import Sized
+from functools import partial, reduce
+from itertools import (
+ chain,
+ combinations,
+ compress,
+ count,
+ cycle,
+ groupby,
+ islice,
+ product,
+ repeat,
+ starmap,
+ tee,
+ zip_longest,
+)
+from random import randrange, sample, choice
+from sys import hexversion
+
+__all__ = [
+ 'all_equal',
+ 'batched',
+ 'before_and_after',
+ 'consume',
+ 'convolve',
+ 'dotproduct',
+ 'first_true',
+ 'factor',
+ 'flatten',
+ 'grouper',
+ 'iter_except',
+ 'iter_index',
+ 'matmul',
+ 'ncycles',
+ 'nth',
+ 'nth_combination',
+ 'padnone',
+ 'pad_none',
+ 'pairwise',
+ 'partition',
+ 'polynomial_eval',
+ 'polynomial_from_roots',
+ 'polynomial_derivative',
+ 'powerset',
+ 'prepend',
+ 'quantify',
+ 'reshape',
+ 'random_combination_with_replacement',
+ 'random_combination',
+ 'random_permutation',
+ 'random_product',
+ 'repeatfunc',
+ 'roundrobin',
+ 'sieve',
+ 'sliding_window',
+ 'subslices',
+ 'sum_of_squares',
+ 'tabulate',
+ 'tail',
+ 'take',
+ 'totient',
+ 'transpose',
+ 'triplewise',
+ 'unique_everseen',
+ 'unique_justseen',
+]
+
+_marker = object()
+
+
+# zip with strict is available for Python 3.10+
+try:
+ zip(strict=True)
+except TypeError:
+ _zip_strict = zip
+else:
+ _zip_strict = partial(zip, strict=True)
+
+# math.sumprod is available for Python 3.12+
+_sumprod = getattr(math, 'sumprod', lambda x, y: dotproduct(x, y))
+
+
+def take(n, iterable):
+ """Return first *n* items of the iterable as a list.
+
+ >>> take(3, range(10))
+ [0, 1, 2]
+
+ If there are fewer than *n* items in the iterable, all of them are
+ returned.
+
+ >>> take(10, range(3))
+ [0, 1, 2]
+
+ """
+ return list(islice(iterable, n))
+
+
+def tabulate(function, start=0):
+ """Return an iterator over the results of ``func(start)``,
+ ``func(start + 1)``, ``func(start + 2)``...
+
+ *func* should be a function that accepts one integer argument.
+
+ If *start* is not specified it defaults to 0. It will be incremented each
+ time the iterator is advanced.
+
+ >>> square = lambda x: x ** 2
+ >>> iterator = tabulate(square, -3)
+ >>> take(4, iterator)
+ [9, 4, 1, 0]
+
+ """
+ return map(function, count(start))
+
+
+def tail(n, iterable):
+ """Return an iterator over the last *n* items of *iterable*.
+
+ >>> t = tail(3, 'ABCDEFG')
+ >>> list(t)
+ ['E', 'F', 'G']
+
+ """
+ # If the given iterable has a length, then we can use islice to get its
+ # final elements. Note that if the iterable is not actually Iterable,
+ # either islice or deque will throw a TypeError. This is why we don't
+ # check if it is Iterable.
+ if isinstance(iterable, Sized):
+ yield from islice(iterable, max(0, len(iterable) - n), None)
+ else:
+ yield from iter(deque(iterable, maxlen=n))
+
+
+def consume(iterator, n=None):
+ """Advance *iterable* by *n* steps. If *n* is ``None``, consume it
+ entirely.
+
+ Efficiently exhausts an iterator without returning values. Defaults to
+ consuming the whole iterator, but an optional second argument may be
+ provided to limit consumption.
+
+ >>> i = (x for x in range(10))
+ >>> next(i)
+ 0
+ >>> consume(i, 3)
+ >>> next(i)
+ 4
+ >>> consume(i)
+ >>> next(i)
+ Traceback (most recent call last):
+ File "", line 1, in
+ StopIteration
+
+ If the iterator has fewer items remaining than the provided limit, the
+ whole iterator will be consumed.
+
+ >>> i = (x for x in range(3))
+ >>> consume(i, 5)
+ >>> next(i)
+ Traceback (most recent call last):
+ File "", line 1, in
+ StopIteration
+
+ """
+ # Use functions that consume iterators at C speed.
+ if n is None:
+ # feed the entire iterator into a zero-length deque
+ deque(iterator, maxlen=0)
+ else:
+ # advance to the empty slice starting at position n
+ next(islice(iterator, n, n), None)
+
+
+def nth(iterable, n, default=None):
+ """Returns the nth item or a default value.
+
+ >>> l = range(10)
+ >>> nth(l, 3)
+ 3
+ >>> nth(l, 20, "zebra")
+ 'zebra'
+
+ """
+ return next(islice(iterable, n, None), default)
+
+
+def all_equal(iterable):
+ """
+ Returns ``True`` if all the elements are equal to each other.
+
+ >>> all_equal('aaaa')
+ True
+ >>> all_equal('aaab')
+ False
+
+ """
+ g = groupby(iterable)
+ return next(g, True) and not next(g, False)
+
+
+def quantify(iterable, pred=bool):
+ """Return the how many times the predicate is true.
+
+ >>> quantify([True, False, True])
+ 2
+
+ """
+ return sum(map(pred, iterable))
+
+
+def pad_none(iterable):
+ """Returns the sequence of elements and then returns ``None`` indefinitely.
+
+ >>> take(5, pad_none(range(3)))
+ [0, 1, 2, None, None]
+
+ Useful for emulating the behavior of the built-in :func:`map` function.
+
+ See also :func:`padded`.
+
+ """
+ return chain(iterable, repeat(None))
+
+
+padnone = pad_none
+
+
+def ncycles(iterable, n):
+ """Returns the sequence elements *n* times
+
+ >>> list(ncycles(["a", "b"], 3))
+ ['a', 'b', 'a', 'b', 'a', 'b']
+
+ """
+ return chain.from_iterable(repeat(tuple(iterable), n))
+
+
+def dotproduct(vec1, vec2):
+ """Returns the dot product of the two iterables.
+
+ >>> dotproduct([10, 10], [20, 20])
+ 400
+
+ """
+ return sum(map(operator.mul, vec1, vec2))
+
+
+def flatten(listOfLists):
+ """Return an iterator flattening one level of nesting in a list of lists.
+
+ >>> list(flatten([[0, 1], [2, 3]]))
+ [0, 1, 2, 3]
+
+ See also :func:`collapse`, which can flatten multiple levels of nesting.
+
+ """
+ return chain.from_iterable(listOfLists)
+
+
+def repeatfunc(func, times=None, *args):
+ """Call *func* with *args* repeatedly, returning an iterable over the
+ results.
+
+ If *times* is specified, the iterable will terminate after that many
+ repetitions:
+
+ >>> from operator import add
+ >>> times = 4
+ >>> args = 3, 5
+ >>> list(repeatfunc(add, times, *args))
+ [8, 8, 8, 8]
+
+ If *times* is ``None`` the iterable will not terminate:
+
+ >>> from random import randrange
+ >>> times = None
+ >>> args = 1, 11
+ >>> take(6, repeatfunc(randrange, times, *args)) # doctest:+SKIP
+ [2, 4, 8, 1, 8, 4]
+
+ """
+ if times is None:
+ return starmap(func, repeat(args))
+ return starmap(func, repeat(args, times))
+
+
+def _pairwise(iterable):
+ """Returns an iterator of paired items, overlapping, from the original
+
+ >>> take(4, pairwise(count()))
+ [(0, 1), (1, 2), (2, 3), (3, 4)]
+
+ On Python 3.10 and above, this is an alias for :func:`itertools.pairwise`.
+
+ """
+ a, b = tee(iterable)
+ next(b, None)
+ return zip(a, b)
+
+
+try:
+ from itertools import pairwise as itertools_pairwise
+except ImportError:
+ pairwise = _pairwise
+else:
+
+ def pairwise(iterable):
+ return itertools_pairwise(iterable)
+
+ pairwise.__doc__ = _pairwise.__doc__
+
+
+class UnequalIterablesError(ValueError):
+ def __init__(self, details=None):
+ msg = 'Iterables have different lengths'
+ if details is not None:
+ msg += (': index 0 has length {}; index {} has length {}').format(
+ *details
+ )
+
+ super().__init__(msg)
+
+
+def _zip_equal_generator(iterables):
+ for combo in zip_longest(*iterables, fillvalue=_marker):
+ for val in combo:
+ if val is _marker:
+ raise UnequalIterablesError()
+ yield combo
+
+
+def _zip_equal(*iterables):
+ # Check whether the iterables are all the same size.
+ try:
+ first_size = len(iterables[0])
+ for i, it in enumerate(iterables[1:], 1):
+ size = len(it)
+ if size != first_size:
+ raise UnequalIterablesError(details=(first_size, i, size))
+ # All sizes are equal, we can use the built-in zip.
+ return zip(*iterables)
+ # If any one of the iterables didn't have a length, start reading
+ # them until one runs out.
+ except TypeError:
+ return _zip_equal_generator(iterables)
+
+
+def grouper(iterable, n, incomplete='fill', fillvalue=None):
+ """Group elements from *iterable* into fixed-length groups of length *n*.
+
+ >>> list(grouper('ABCDEF', 3))
+ [('A', 'B', 'C'), ('D', 'E', 'F')]
+
+ The keyword arguments *incomplete* and *fillvalue* control what happens for
+ iterables whose length is not a multiple of *n*.
+
+ When *incomplete* is `'fill'`, the last group will contain instances of
+ *fillvalue*.
+
+ >>> list(grouper('ABCDEFG', 3, incomplete='fill', fillvalue='x'))
+ [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')]
+
+ When *incomplete* is `'ignore'`, the last group will not be emitted.
+
+ >>> list(grouper('ABCDEFG', 3, incomplete='ignore', fillvalue='x'))
+ [('A', 'B', 'C'), ('D', 'E', 'F')]
+
+ When *incomplete* is `'strict'`, a subclass of `ValueError` will be raised.
+
+ >>> it = grouper('ABCDEFG', 3, incomplete='strict')
+ >>> list(it) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ UnequalIterablesError
+
+ """
+ args = [iter(iterable)] * n
+ if incomplete == 'fill':
+ return zip_longest(*args, fillvalue=fillvalue)
+ if incomplete == 'strict':
+ return _zip_equal(*args)
+ if incomplete == 'ignore':
+ return zip(*args)
+ else:
+ raise ValueError('Expected fill, strict, or ignore')
+
+
+def roundrobin(*iterables):
+ """Yields an item from each iterable, alternating between them.
+
+ >>> list(roundrobin('ABC', 'D', 'EF'))
+ ['A', 'D', 'E', 'B', 'F', 'C']
+
+ This function produces the same output as :func:`interleave_longest`, but
+ may perform better for some inputs (in particular when the number of
+ iterables is small).
+
+ """
+ # Recipe credited to George Sakkis
+ pending = len(iterables)
+ nexts = cycle(iter(it).__next__ for it in iterables)
+ while pending:
+ try:
+ for next in nexts:
+ yield next()
+ except StopIteration:
+ pending -= 1
+ nexts = cycle(islice(nexts, pending))
+
+
+def partition(pred, iterable):
+ """
+ Returns a 2-tuple of iterables derived from the input iterable.
+ The first yields the items that have ``pred(item) == False``.
+ The second yields the items that have ``pred(item) == True``.
+
+ >>> is_odd = lambda x: x % 2 != 0
+ >>> iterable = range(10)
+ >>> even_items, odd_items = partition(is_odd, iterable)
+ >>> list(even_items), list(odd_items)
+ ([0, 2, 4, 6, 8], [1, 3, 5, 7, 9])
+
+ If *pred* is None, :func:`bool` is used.
+
+ >>> iterable = [0, 1, False, True, '', ' ']
+ >>> false_items, true_items = partition(None, iterable)
+ >>> list(false_items), list(true_items)
+ ([0, False, ''], [1, True, ' '])
+
+ """
+ if pred is None:
+ pred = bool
+
+ t1, t2, p = tee(iterable, 3)
+ p1, p2 = tee(map(pred, p))
+ return (compress(t1, map(operator.not_, p1)), compress(t2, p2))
+
+
+def powerset(iterable):
+ """Yields all possible subsets of the iterable.
+
+ >>> list(powerset([1, 2, 3]))
+ [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
+
+ :func:`powerset` will operate on iterables that aren't :class:`set`
+ instances, so repeated elements in the input will produce repeated elements
+ in the output. Use :func:`unique_everseen` on the input to avoid generating
+ duplicates:
+
+ >>> seq = [1, 1, 0]
+ >>> list(powerset(seq))
+ [(), (1,), (1,), (0,), (1, 1), (1, 0), (1, 0), (1, 1, 0)]
+ >>> from more_itertools import unique_everseen
+ >>> list(powerset(unique_everseen(seq)))
+ [(), (1,), (0,), (1, 0)]
+
+ """
+ s = list(iterable)
+ return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
+
+
+def unique_everseen(iterable, key=None):
+ """
+ Yield unique elements, preserving order.
+
+ >>> list(unique_everseen('AAAABBBCCDAABBB'))
+ ['A', 'B', 'C', 'D']
+ >>> list(unique_everseen('ABBCcAD', str.lower))
+ ['A', 'B', 'C', 'D']
+
+ Sequences with a mix of hashable and unhashable items can be used.
+ The function will be slower (i.e., `O(n^2)`) for unhashable items.
+
+ Remember that ``list`` objects are unhashable - you can use the *key*
+ parameter to transform the list to a tuple (which is hashable) to
+ avoid a slowdown.
+
+ >>> iterable = ([1, 2], [2, 3], [1, 2])
+ >>> list(unique_everseen(iterable)) # Slow
+ [[1, 2], [2, 3]]
+ >>> list(unique_everseen(iterable, key=tuple)) # Faster
+ [[1, 2], [2, 3]]
+
+ Similarly, you may want to convert unhashable ``set`` objects with
+ ``key=frozenset``. For ``dict`` objects,
+ ``key=lambda x: frozenset(x.items())`` can be used.
+
+ """
+ seenset = set()
+ seenset_add = seenset.add
+ seenlist = []
+ seenlist_add = seenlist.append
+ use_key = key is not None
+
+ for element in iterable:
+ k = key(element) if use_key else element
+ try:
+ if k not in seenset:
+ seenset_add(k)
+ yield element
+ except TypeError:
+ if k not in seenlist:
+ seenlist_add(k)
+ yield element
+
+
+def unique_justseen(iterable, key=None):
+ """Yields elements in order, ignoring serial duplicates
+
+ >>> list(unique_justseen('AAAABBBCCDAABBB'))
+ ['A', 'B', 'C', 'D', 'A', 'B']
+ >>> list(unique_justseen('ABBCcAD', str.lower))
+ ['A', 'B', 'C', 'A', 'D']
+
+ """
+ if key is None:
+ return map(operator.itemgetter(0), groupby(iterable))
+
+ return map(next, map(operator.itemgetter(1), groupby(iterable, key)))
+
+
+def iter_except(func, exception, first=None):
+ """Yields results from a function repeatedly until an exception is raised.
+
+ Converts a call-until-exception interface to an iterator interface.
+ Like ``iter(func, sentinel)``, but uses an exception instead of a sentinel
+ to end the loop.
+
+ >>> l = [0, 1, 2]
+ >>> list(iter_except(l.pop, IndexError))
+ [2, 1, 0]
+
+ Multiple exceptions can be specified as a stopping condition:
+
+ >>> l = [1, 2, 3, '...', 4, 5, 6]
+ >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
+ [7, 6, 5]
+ >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
+ [4, 3, 2]
+ >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
+ []
+
+ """
+ try:
+ if first is not None:
+ yield first()
+ while 1:
+ yield func()
+ except exception:
+ pass
+
+
+def first_true(iterable, default=None, pred=None):
+ """
+ Returns the first true value in the iterable.
+
+ If no true value is found, returns *default*
+
+ If *pred* is not None, returns the first item for which
+ ``pred(item) == True`` .
+
+ >>> first_true(range(10))
+ 1
+ >>> first_true(range(10), pred=lambda x: x > 5)
+ 6
+ >>> first_true(range(10), default='missing', pred=lambda x: x > 9)
+ 'missing'
+
+ """
+ return next(filter(pred, iterable), default)
+
+
+def random_product(*args, repeat=1):
+ """Draw an item at random from each of the input iterables.
+
+ >>> random_product('abc', range(4), 'XYZ') # doctest:+SKIP
+ ('c', 3, 'Z')
+
+ If *repeat* is provided as a keyword argument, that many items will be
+ drawn from each iterable.
+
+ >>> random_product('abcd', range(4), repeat=2) # doctest:+SKIP
+ ('a', 2, 'd', 3)
+
+ This equivalent to taking a random selection from
+ ``itertools.product(*args, **kwarg)``.
+
+ """
+ pools = [tuple(pool) for pool in args] * repeat
+ return tuple(choice(pool) for pool in pools)
+
+
+def random_permutation(iterable, r=None):
+ """Return a random *r* length permutation of the elements in *iterable*.
+
+ If *r* is not specified or is ``None``, then *r* defaults to the length of
+ *iterable*.
+
+ >>> random_permutation(range(5)) # doctest:+SKIP
+ (3, 4, 0, 1, 2)
+
+ This equivalent to taking a random selection from
+ ``itertools.permutations(iterable, r)``.
+
+ """
+ pool = tuple(iterable)
+ r = len(pool) if r is None else r
+ return tuple(sample(pool, r))
+
+
+def random_combination(iterable, r):
+ """Return a random *r* length subsequence of the elements in *iterable*.
+
+ >>> random_combination(range(5), 3) # doctest:+SKIP
+ (2, 3, 4)
+
+ This equivalent to taking a random selection from
+ ``itertools.combinations(iterable, r)``.
+
+ """
+ pool = tuple(iterable)
+ n = len(pool)
+ indices = sorted(sample(range(n), r))
+ return tuple(pool[i] for i in indices)
+
+
+def random_combination_with_replacement(iterable, r):
+ """Return a random *r* length subsequence of elements in *iterable*,
+ allowing individual elements to be repeated.
+
+ >>> random_combination_with_replacement(range(3), 5) # doctest:+SKIP
+ (0, 0, 1, 2, 2)
+
+ This equivalent to taking a random selection from
+ ``itertools.combinations_with_replacement(iterable, r)``.
+
+ """
+ pool = tuple(iterable)
+ n = len(pool)
+ indices = sorted(randrange(n) for i in range(r))
+ return tuple(pool[i] for i in indices)
+
+
+def nth_combination(iterable, r, index):
+ """Equivalent to ``list(combinations(iterable, r))[index]``.
+
+ The subsequences of *iterable* that are of length *r* can be ordered
+ lexicographically. :func:`nth_combination` computes the subsequence at
+ sort position *index* directly, without computing the previous
+ subsequences.
+
+ >>> nth_combination(range(5), 3, 5)
+ (0, 3, 4)
+
+ ``ValueError`` will be raised If *r* is negative or greater than the length
+ of *iterable*.
+ ``IndexError`` will be raised if the given *index* is invalid.
+ """
+ pool = tuple(iterable)
+ n = len(pool)
+ if (r < 0) or (r > n):
+ raise ValueError
+
+ c = 1
+ k = min(r, n - r)
+ for i in range(1, k + 1):
+ c = c * (n - k + i) // i
+
+ if index < 0:
+ index += c
+
+ if (index < 0) or (index >= c):
+ raise IndexError
+
+ result = []
+ while r:
+ c, n, r = c * r // n, n - 1, r - 1
+ while index >= c:
+ index -= c
+ c, n = c * (n - r) // n, n - 1
+ result.append(pool[-1 - n])
+
+ return tuple(result)
+
+
+def prepend(value, iterator):
+ """Yield *value*, followed by the elements in *iterator*.
+
+ >>> value = '0'
+ >>> iterator = ['1', '2', '3']
+ >>> list(prepend(value, iterator))
+ ['0', '1', '2', '3']
+
+ To prepend multiple values, see :func:`itertools.chain`
+ or :func:`value_chain`.
+
+ """
+ return chain([value], iterator)
+
+
+def convolve(signal, kernel):
+ """Convolve the iterable *signal* with the iterable *kernel*.
+
+ >>> signal = (1, 2, 3, 4, 5)
+ >>> kernel = [3, 2, 1]
+ >>> list(convolve(signal, kernel))
+ [3, 8, 14, 20, 26, 14, 5]
+
+ Note: the input arguments are not interchangeable, as the *kernel*
+ is immediately consumed and stored.
+
+ """
+ # This implementation intentionally doesn't match the one in the itertools
+ # documentation.
+ kernel = tuple(kernel)[::-1]
+ n = len(kernel)
+ window = deque([0], maxlen=n) * n
+ for x in chain(signal, repeat(0, n - 1)):
+ window.append(x)
+ yield _sumprod(kernel, window)
+
+
+def before_and_after(predicate, it):
+ """A variant of :func:`takewhile` that allows complete access to the
+ remainder of the iterator.
+
+ >>> it = iter('ABCdEfGhI')
+ >>> all_upper, remainder = before_and_after(str.isupper, it)
+ >>> ''.join(all_upper)
+ 'ABC'
+ >>> ''.join(remainder) # takewhile() would lose the 'd'
+ 'dEfGhI'
+
+ Note that the first iterator must be fully consumed before the second
+ iterator can generate valid results.
+ """
+ it = iter(it)
+ transition = []
+
+ def true_iterator():
+ for elem in it:
+ if predicate(elem):
+ yield elem
+ else:
+ transition.append(elem)
+ return
+
+ # Note: this is different from itertools recipes to allow nesting
+ # before_and_after remainders into before_and_after again. See tests
+ # for an example.
+ remainder_iterator = chain(transition, it)
+
+ return true_iterator(), remainder_iterator
+
+
+def triplewise(iterable):
+ """Return overlapping triplets from *iterable*.
+
+ >>> list(triplewise('ABCDE'))
+ [('A', 'B', 'C'), ('B', 'C', 'D'), ('C', 'D', 'E')]
+
+ """
+ for (a, _), (b, c) in pairwise(pairwise(iterable)):
+ yield a, b, c
+
+
+def sliding_window(iterable, n):
+ """Return a sliding window of width *n* over *iterable*.
+
+ >>> list(sliding_window(range(6), 4))
+ [(0, 1, 2, 3), (1, 2, 3, 4), (2, 3, 4, 5)]
+
+ If *iterable* has fewer than *n* items, then nothing is yielded:
+
+ >>> list(sliding_window(range(3), 4))
+ []
+
+ For a variant with more features, see :func:`windowed`.
+ """
+ it = iter(iterable)
+ window = deque(islice(it, n - 1), maxlen=n)
+ for x in it:
+ window.append(x)
+ yield tuple(window)
+
+
+def subslices(iterable):
+ """Return all contiguous non-empty subslices of *iterable*.
+
+ >>> list(subslices('ABC'))
+ [['A'], ['A', 'B'], ['A', 'B', 'C'], ['B'], ['B', 'C'], ['C']]
+
+ This is similar to :func:`substrings`, but emits items in a different
+ order.
+ """
+ seq = list(iterable)
+ slices = starmap(slice, combinations(range(len(seq) + 1), 2))
+ return map(operator.getitem, repeat(seq), slices)
+
+
+def polynomial_from_roots(roots):
+ """Compute a polynomial's coefficients from its roots.
+
+ >>> roots = [5, -4, 3] # (x - 5) * (x + 4) * (x - 3)
+ >>> polynomial_from_roots(roots) # x^3 - 4 * x^2 - 17 * x + 60
+ [1, -4, -17, 60]
+ """
+ factors = zip(repeat(1), map(operator.neg, roots))
+ return list(reduce(convolve, factors, [1]))
+
+
+def iter_index(iterable, value, start=0, stop=None):
+ """Yield the index of each place in *iterable* that *value* occurs,
+ beginning with index *start* and ending before index *stop*.
+
+ See :func:`locate` for a more general means of finding the indexes
+ associated with particular values.
+
+ >>> list(iter_index('AABCADEAF', 'A'))
+ [0, 1, 4, 7]
+ >>> list(iter_index('AABCADEAF', 'A', 1)) # start index is inclusive
+ [1, 4, 7]
+ >>> list(iter_index('AABCADEAF', 'A', 1, 7)) # stop index is not inclusive
+ [1, 4]
+ """
+ seq_index = getattr(iterable, 'index', None)
+ if seq_index is None:
+ # Slow path for general iterables
+ it = islice(iterable, start, stop)
+ for i, element in enumerate(it, start):
+ if element is value or element == value:
+ yield i
+ else:
+ # Fast path for sequences
+ stop = len(iterable) if stop is None else stop
+ i = start - 1
+ try:
+ while True:
+ yield (i := seq_index(value, i + 1, stop))
+ except ValueError:
+ pass
+
+
+def sieve(n):
+ """Yield the primes less than n.
+
+ >>> list(sieve(30))
+ [2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
+ """
+ if n > 2:
+ yield 2
+ start = 3
+ data = bytearray((0, 1)) * (n // 2)
+ limit = math.isqrt(n) + 1
+ for p in iter_index(data, 1, start, limit):
+ yield from iter_index(data, 1, start, p * p)
+ data[p * p : n : p + p] = bytes(len(range(p * p, n, p + p)))
+ start = p * p
+ yield from iter_index(data, 1, start)
+
+
+def _batched(iterable, n, *, strict=False):
+ """Batch data into tuples of length *n*. If the number of items in
+ *iterable* is not divisible by *n*:
+ * The last batch will be shorter if *strict* is ``False``.
+ * :exc:`ValueError` will be raised if *strict* is ``True``.
+
+ >>> list(batched('ABCDEFG', 3))
+ [('A', 'B', 'C'), ('D', 'E', 'F'), ('G',)]
+
+ On Python 3.13 and above, this is an alias for :func:`itertools.batched`.
+ """
+ if n < 1:
+ raise ValueError('n must be at least one')
+ it = iter(iterable)
+ while batch := tuple(islice(it, n)):
+ if strict and len(batch) != n:
+ raise ValueError('batched(): incomplete batch')
+ yield batch
+
+
+if hexversion >= 0x30D00A2:
+ from itertools import batched as itertools_batched
+
+ def batched(iterable, n, *, strict=False):
+ return itertools_batched(iterable, n, strict=strict)
+
+else:
+ batched = _batched
+
+ batched.__doc__ = _batched.__doc__
+
+
+def transpose(it):
+ """Swap the rows and columns of the input matrix.
+
+ >>> list(transpose([(1, 2, 3), (11, 22, 33)]))
+ [(1, 11), (2, 22), (3, 33)]
+
+ The caller should ensure that the dimensions of the input are compatible.
+ If the input is empty, no output will be produced.
+ """
+ return _zip_strict(*it)
+
+
+def reshape(matrix, cols):
+ """Reshape the 2-D input *matrix* to have a column count given by *cols*.
+
+ >>> matrix = [(0, 1), (2, 3), (4, 5)]
+ >>> cols = 3
+ >>> list(reshape(matrix, cols))
+ [(0, 1, 2), (3, 4, 5)]
+ """
+ return batched(chain.from_iterable(matrix), cols)
+
+
+def matmul(m1, m2):
+ """Multiply two matrices.
+
+ >>> list(matmul([(7, 5), (3, 5)], [(2, 5), (7, 9)]))
+ [(49, 80), (41, 60)]
+
+ The caller should ensure that the dimensions of the input matrices are
+ compatible with each other.
+ """
+ n = len(m2[0])
+ return batched(starmap(_sumprod, product(m1, transpose(m2))), n)
+
+
+def factor(n):
+ """Yield the prime factors of n.
+
+ >>> list(factor(360))
+ [2, 2, 2, 3, 3, 5]
+ """
+ for prime in sieve(math.isqrt(n) + 1):
+ while not n % prime:
+ yield prime
+ n //= prime
+ if n == 1:
+ return
+ if n > 1:
+ yield n
+
+
+def polynomial_eval(coefficients, x):
+ """Evaluate a polynomial at a specific value.
+
+ Example: evaluating x^3 - 4 * x^2 - 17 * x + 60 at x = 2.5:
+
+ >>> coefficients = [1, -4, -17, 60]
+ >>> x = 2.5
+ >>> polynomial_eval(coefficients, x)
+ 8.125
+ """
+ n = len(coefficients)
+ if n == 0:
+ return x * 0 # coerce zero to the type of x
+ powers = map(pow, repeat(x), reversed(range(n)))
+ return _sumprod(coefficients, powers)
+
+
+def sum_of_squares(it):
+ """Return the sum of the squares of the input values.
+
+ >>> sum_of_squares([10, 20, 30])
+ 1400
+ """
+ return _sumprod(*tee(it))
+
+
+def polynomial_derivative(coefficients):
+ """Compute the first derivative of a polynomial.
+
+ Example: evaluating the derivative of x^3 - 4 * x^2 - 17 * x + 60
+
+ >>> coefficients = [1, -4, -17, 60]
+ >>> derivative_coefficients = polynomial_derivative(coefficients)
+ >>> derivative_coefficients
+ [3, -8, -17]
+ """
+ n = len(coefficients)
+ powers = reversed(range(1, n))
+ return list(map(operator.mul, coefficients, powers))
+
+
+def totient(n):
+ """Return the count of natural numbers up to *n* that are coprime with *n*.
+
+ >>> totient(9)
+ 6
+ >>> totient(12)
+ 4
+ """
+ for p in unique_justseen(factor(n)):
+ n = n // p * (p - 1)
+
+ return n
diff --git a/venv/lib/python3.10/site-packages/more_itertools/recipes.pyi b/venv/lib/python3.10/site-packages/more_itertools/recipes.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..ed4c19db49b6d9bd4905bcf4242f677987fc5a27
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/more_itertools/recipes.pyi
@@ -0,0 +1,128 @@
+"""Stubs for more_itertools.recipes"""
+from __future__ import annotations
+
+from typing import (
+ Any,
+ Callable,
+ Iterable,
+ Iterator,
+ overload,
+ Sequence,
+ Type,
+ TypeVar,
+)
+
+# Type and type variable definitions
+_T = TypeVar('_T')
+_T1 = TypeVar('_T1')
+_T2 = TypeVar('_T2')
+_U = TypeVar('_U')
+
+def take(n: int, iterable: Iterable[_T]) -> list[_T]: ...
+def tabulate(
+ function: Callable[[int], _T], start: int = ...
+) -> Iterator[_T]: ...
+def tail(n: int, iterable: Iterable[_T]) -> Iterator[_T]: ...
+def consume(iterator: Iterable[_T], n: int | None = ...) -> None: ...
+@overload
+def nth(iterable: Iterable[_T], n: int) -> _T | None: ...
+@overload
+def nth(iterable: Iterable[_T], n: int, default: _U) -> _T | _U: ...
+def all_equal(iterable: Iterable[_T]) -> bool: ...
+def quantify(
+ iterable: Iterable[_T], pred: Callable[[_T], bool] = ...
+) -> int: ...
+def pad_none(iterable: Iterable[_T]) -> Iterator[_T | None]: ...
+def padnone(iterable: Iterable[_T]) -> Iterator[_T | None]: ...
+def ncycles(iterable: Iterable[_T], n: int) -> Iterator[_T]: ...
+def dotproduct(vec1: Iterable[_T1], vec2: Iterable[_T2]) -> Any: ...
+def flatten(listOfLists: Iterable[Iterable[_T]]) -> Iterator[_T]: ...
+def repeatfunc(
+ func: Callable[..., _U], times: int | None = ..., *args: Any
+) -> Iterator[_U]: ...
+def pairwise(iterable: Iterable[_T]) -> Iterator[tuple[_T, _T]]: ...
+def grouper(
+ iterable: Iterable[_T],
+ n: int,
+ incomplete: str = ...,
+ fillvalue: _U = ...,
+) -> Iterator[tuple[_T | _U, ...]]: ...
+def roundrobin(*iterables: Iterable[_T]) -> Iterator[_T]: ...
+def partition(
+ pred: Callable[[_T], object] | None, iterable: Iterable[_T]
+) -> tuple[Iterator[_T], Iterator[_T]]: ...
+def powerset(iterable: Iterable[_T]) -> Iterator[tuple[_T, ...]]: ...
+def unique_everseen(
+ iterable: Iterable[_T], key: Callable[[_T], _U] | None = ...
+) -> Iterator[_T]: ...
+def unique_justseen(
+ iterable: Iterable[_T], key: Callable[[_T], object] | None = ...
+) -> Iterator[_T]: ...
+@overload
+def iter_except(
+ func: Callable[[], _T],
+ exception: Type[BaseException] | tuple[Type[BaseException], ...],
+ first: None = ...,
+) -> Iterator[_T]: ...
+@overload
+def iter_except(
+ func: Callable[[], _T],
+ exception: Type[BaseException] | tuple[Type[BaseException], ...],
+ first: Callable[[], _U],
+) -> Iterator[_T | _U]: ...
+@overload
+def first_true(
+ iterable: Iterable[_T], *, pred: Callable[[_T], object] | None = ...
+) -> _T | None: ...
+@overload
+def first_true(
+ iterable: Iterable[_T],
+ default: _U,
+ pred: Callable[[_T], object] | None = ...,
+) -> _T | _U: ...
+def random_product(
+ *args: Iterable[_T], repeat: int = ...
+) -> tuple[_T, ...]: ...
+def random_permutation(
+ iterable: Iterable[_T], r: int | None = ...
+) -> tuple[_T, ...]: ...
+def random_combination(iterable: Iterable[_T], r: int) -> tuple[_T, ...]: ...
+def random_combination_with_replacement(
+ iterable: Iterable[_T], r: int
+) -> tuple[_T, ...]: ...
+def nth_combination(
+ iterable: Iterable[_T], r: int, index: int
+) -> tuple[_T, ...]: ...
+def prepend(value: _T, iterator: Iterable[_U]) -> Iterator[_T | _U]: ...
+def convolve(signal: Iterable[_T], kernel: Iterable[_T]) -> Iterator[_T]: ...
+def before_and_after(
+ predicate: Callable[[_T], bool], it: Iterable[_T]
+) -> tuple[Iterator[_T], Iterator[_T]]: ...
+def triplewise(iterable: Iterable[_T]) -> Iterator[tuple[_T, _T, _T]]: ...
+def sliding_window(
+ iterable: Iterable[_T], n: int
+) -> Iterator[tuple[_T, ...]]: ...
+def subslices(iterable: Iterable[_T]) -> Iterator[list[_T]]: ...
+def polynomial_from_roots(roots: Sequence[_T]) -> list[_T]: ...
+def iter_index(
+ iterable: Iterable[_T],
+ value: Any,
+ start: int | None = ...,
+ stop: int | None = ...,
+) -> Iterator[int]: ...
+def sieve(n: int) -> Iterator[int]: ...
+def batched(
+ iterable: Iterable[_T], n: int, *, strict: bool = False
+) -> Iterator[tuple[_T]]: ...
+def transpose(
+ it: Iterable[Iterable[_T]],
+) -> Iterator[tuple[_T, ...]]: ...
+def reshape(
+ matrix: Iterable[Iterable[_T]], cols: int
+) -> Iterator[tuple[_T, ...]]: ...
+def matmul(m1: Sequence[_T], m2: Sequence[_T]) -> Iterator[tuple[_T]]: ...
+def factor(n: int) -> Iterator[int]: ...
+def polynomial_eval(coefficients: Sequence[_T], x: _U) -> _U: ...
+def sum_of_squares(it: Iterable[_T]) -> _T: ...
+def polynomial_derivative(coefficients: Sequence[_T]) -> list[_T]: ...
+def totient(n: int) -> int: ...
diff --git a/venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/License.txt b/venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/License.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b491c70e0aef319022ded661e111ddbd45b8a17f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/License.txt
@@ -0,0 +1,1568 @@
+End User License Agreement
+--------------------------
+
+
+Preface
+-------
+
+The Software License Agreement in Chapter 1 and the Supplement
+in Chapter 2 contain license terms and conditions that govern
+the use of NVIDIA software. By accepting this agreement, you
+agree to comply with all the terms and conditions applicable
+to the product(s) included herein.
+
+
+NVIDIA Driver
+
+
+Description
+
+This package contains the operating system driver and
+fundamental system software components for NVIDIA GPUs.
+
+
+NVIDIA CUDA Toolkit
+
+
+Description
+
+The NVIDIA CUDA Toolkit provides command-line and graphical
+tools for building, debugging and optimizing the performance
+of applications accelerated by NVIDIA GPUs, runtime and math
+libraries, and documentation including programming guides,
+user manuals, and API references.
+
+
+Default Install Location of CUDA Toolkit
+
+Windows platform:
+
+%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v#.#
+
+Linux platform:
+
+/usr/local/cuda-#.#
+
+Mac platform:
+
+/Developer/NVIDIA/CUDA-#.#
+
+
+NVIDIA CUDA Samples
+
+
+Description
+
+This package includes over 100+ CUDA examples that demonstrate
+various CUDA programming principles, and efficient CUDA
+implementation of algorithms in specific application domains.
+
+
+Default Install Location of CUDA Samples
+
+Windows platform:
+
+%ProgramData%\NVIDIA Corporation\CUDA Samples\v#.#
+
+Linux platform:
+
+/usr/local/cuda-#.#/samples
+
+and
+
+$HOME/NVIDIA_CUDA-#.#_Samples
+
+Mac platform:
+
+/Developer/NVIDIA/CUDA-#.#/samples
+
+
+NVIDIA Nsight Visual Studio Edition (Windows only)
+
+
+Description
+
+NVIDIA Nsight Development Platform, Visual Studio Edition is a
+development environment integrated into Microsoft Visual
+Studio that provides tools for debugging, profiling, analyzing
+and optimizing your GPU computing and graphics applications.
+
+
+Default Install Location of Nsight Visual Studio Edition
+
+Windows platform:
+
+%ProgramFiles(x86)%\NVIDIA Corporation\Nsight Visual Studio Edition #.#
+
+
+1. License Agreement for NVIDIA Software Development Kits
+---------------------------------------------------------
+
+
+Release Date: July 26, 2018
+---------------------------
+
+
+Important NoticeRead before downloading, installing,
+copying or using the licensed software:
+-------------------------------------------------------
+
+This license agreement, including exhibits attached
+("Agreement”) is a legal agreement between you and NVIDIA
+Corporation ("NVIDIA") and governs your use of a NVIDIA
+software development kit (“SDK”).
+
+Each SDK has its own set of software and materials, but here
+is a description of the types of items that may be included in
+a SDK: source code, header files, APIs, data sets and assets
+(examples include images, textures, models, scenes, videos,
+native API input/output files), binary software, sample code,
+libraries, utility programs, programming code and
+documentation.
+
+This Agreement can be accepted only by an adult of legal age
+of majority in the country in which the SDK is used.
+
+If you are entering into this Agreement on behalf of a company
+or other legal entity, you represent that you have the legal
+authority to bind the entity to this Agreement, in which case
+“you” will mean the entity you represent.
+
+If you don’t have the required age or authority to accept
+this Agreement, or if you don’t accept all the terms and
+conditions of this Agreement, do not download, install or use
+the SDK.
+
+You agree to use the SDK only for purposes that are permitted
+by (a) this Agreement, and (b) any applicable law, regulation
+or generally accepted practices or guidelines in the relevant
+jurisdictions.
+
+
+1.1. License
+
+
+1.1.1. License Grant
+
+Subject to the terms of this Agreement, NVIDIA hereby grants
+you a non-exclusive, non-transferable license, without the
+right to sublicense (except as expressly provided in this
+Agreement) to:
+
+ 1. Install and use the SDK,
+
+ 2. Modify and create derivative works of sample source code
+ delivered in the SDK, and
+
+ 3. Distribute those portions of the SDK that are identified
+ in this Agreement as distributable, as incorporated in
+ object code format into a software application that meets
+ the distribution requirements indicated in this Agreement.
+
+
+1.1.2. Distribution Requirements
+
+These are the distribution requirements for you to exercise
+the distribution grant:
+
+ 1. Your application must have material additional
+ functionality, beyond the included portions of the SDK.
+
+ 2. The distributable portions of the SDK shall only be
+ accessed by your application.
+
+ 3. The following notice shall be included in modifications
+ and derivative works of sample source code distributed:
+ “This software contains source code provided by NVIDIA
+ Corporation.”
+
+ 4. Unless a developer tool is identified in this Agreement
+ as distributable, it is delivered for your internal use
+ only.
+
+ 5. The terms under which you distribute your application
+ must be consistent with the terms of this Agreement,
+ including (without limitation) terms relating to the
+ license grant and license restrictions and protection of
+ NVIDIA’s intellectual property rights. Additionally, you
+ agree that you will protect the privacy, security and
+ legal rights of your application users.
+
+ 6. You agree to notify NVIDIA in writing of any known or
+ suspected distribution or use of the SDK not in compliance
+ with the requirements of this Agreement, and to enforce
+ the terms of your agreements with respect to distributed
+ SDK.
+
+
+1.1.3. Authorized Users
+
+You may allow employees and contractors of your entity or of
+your subsidiary(ies) to access and use the SDK from your
+secure network to perform work on your behalf.
+
+If you are an academic institution you may allow users
+enrolled or employed by the academic institution to access and
+use the SDK from your secure network.
+
+You are responsible for the compliance with the terms of this
+Agreement by your authorized users. If you become aware that
+your authorized users didn’t follow the terms of this
+Agreement, you agree to take reasonable steps to resolve the
+non-compliance and prevent new occurrences.
+
+
+1.1.4. Pre-Release SDK
+
+The SDK versions identified as alpha, beta, preview or
+otherwise as pre-release, may not be fully functional, may
+contain errors or design flaws, and may have reduced or
+different security, privacy, accessibility, availability, and
+reliability standards relative to commercial versions of
+NVIDIA software and materials. Use of a pre-release SDK may
+result in unexpected results, loss of data, project delays or
+other unpredictable damage or loss.
+
+You may use a pre-release SDK at your own risk, understanding
+that pre-release SDKs are not intended for use in production
+or business-critical systems.
+
+NVIDIA may choose not to make available a commercial version
+of any pre-release SDK. NVIDIA may also choose to abandon
+development and terminate the availability of a pre-release
+SDK at any time without liability.
+
+
+1.1.5. Updates
+
+NVIDIA may, at its option, make available patches, workarounds
+or other updates to this SDK. Unless the updates are provided
+with their separate governing terms, they are deemed part of
+the SDK licensed to you as provided in this Agreement. You
+agree that the form and content of the SDK that NVIDIA
+provides may change without prior notice to you. While NVIDIA
+generally maintains compatibility between versions, NVIDIA may
+in some cases make changes that introduce incompatibilities in
+future versions of the SDK.
+
+
+1.1.6. Third Party Licenses
+
+The SDK may come bundled with, or otherwise include or be
+distributed with, third party software licensed by a NVIDIA
+supplier and/or open source software provided under an open
+source license. Use of third party software is subject to the
+third-party license terms, or in the absence of third party
+terms, the terms of this Agreement. Copyright to third party
+software is held by the copyright holders indicated in the
+third-party software or license.
+
+
+1.1.7. Reservation of Rights
+
+NVIDIA reserves all rights, title, and interest in and to the
+SDK, not expressly granted to you under this Agreement.
+
+
+1.2. Limitations
+
+The following license limitations apply to your use of the
+SDK:
+
+ 1. You may not reverse engineer, decompile or disassemble,
+ or remove copyright or other proprietary notices from any
+ portion of the SDK or copies of the SDK.
+
+ 2. Except as expressly provided in this Agreement, you may
+ not copy, sell, rent, sublicense, transfer, distribute,
+ modify, or create derivative works of any portion of the
+ SDK. For clarity, you may not distribute or sublicense the
+ SDK as a stand-alone product.
+
+ 3. Unless you have an agreement with NVIDIA for this
+ purpose, you may not indicate that an application created
+ with the SDK is sponsored or endorsed by NVIDIA.
+
+ 4. You may not bypass, disable, or circumvent any
+ encryption, security, digital rights management or
+ authentication mechanism in the SDK.
+
+ 5. You may not use the SDK in any manner that would cause it
+ to become subject to an open source software license. As
+ examples, licenses that require as a condition of use,
+ modification, and/or distribution that the SDK be:
+
+ a. Disclosed or distributed in source code form;
+
+ b. Licensed for the purpose of making derivative works;
+ or
+
+ c. Redistributable at no charge.
+
+ 6. Unless you have an agreement with NVIDIA for this
+ purpose, you may not use the SDK with any system or
+ application where the use or failure of the system or
+ application can reasonably be expected to threaten or
+ result in personal injury, death, or catastrophic loss.
+ Examples include use in avionics, navigation, military,
+ medical, life support or other life critical applications.
+ NVIDIA does not design, test or manufacture the SDK for
+ these critical uses and NVIDIA shall not be liable to you
+ or any third party, in whole or in part, for any claims or
+ damages arising from such uses.
+
+ 7. You agree to defend, indemnify and hold harmless NVIDIA
+ and its affiliates, and their respective employees,
+ contractors, agents, officers and directors, from and
+ against any and all claims, damages, obligations, losses,
+ liabilities, costs or debt, fines, restitutions and
+ expenses (including but not limited to attorney’s fees
+ and costs incident to establishing the right of
+ indemnification) arising out of or related to your use of
+ the SDK outside of the scope of this Agreement, or not in
+ compliance with its terms.
+
+
+1.3. Ownership
+
+ 1. NVIDIA or its licensors hold all rights, title and
+ interest in and to the SDK and its modifications and
+ derivative works, including their respective intellectual
+ property rights, subject to your rights described in this
+ section. This SDK may include software and materials from
+ NVIDIA’s licensors, and these licensors are intended
+ third party beneficiaries that may enforce this Agreement
+ with respect to their intellectual property rights.
+
+ 2. You hold all rights, title and interest in and to your
+ applications and your derivative works of the sample
+ source code delivered in the SDK, including their
+ respective intellectual property rights, subject to
+ NVIDIA’s rights described in this section.
+
+ 3. You may, but don’t have to, provide to NVIDIA
+ suggestions, feature requests or other feedback regarding
+ the SDK, including possible enhancements or modifications
+ to the SDK. For any feedback that you voluntarily provide,
+ you hereby grant NVIDIA and its affiliates a perpetual,
+ non-exclusive, worldwide, irrevocable license to use,
+ reproduce, modify, license, sublicense (through multiple
+ tiers of sublicensees), and distribute (through multiple
+ tiers of distributors) it without the payment of any
+ royalties or fees to you. NVIDIA will use feedback at its
+ choice. NVIDIA is constantly looking for ways to improve
+ its products, so you may send feedback to NVIDIA through
+ the developer portal at https://developer.nvidia.com.
+
+
+1.4. No Warranties
+
+THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL
+FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND
+ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND
+OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING,
+BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE
+ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO
+WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF
+DEALING OR COURSE OF TRADE.
+
+
+1.5. Limitation of Liability
+
+TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS
+AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL,
+PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS
+OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF
+PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION
+WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK,
+WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH
+OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE),
+PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF
+LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES
+TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS
+AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE
+NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS
+LIMIT.
+
+These exclusions and limitations of liability shall apply
+regardless if NVIDIA or its affiliates have been advised of
+the possibility of such damages, and regardless of whether a
+remedy fails its essential purpose. These exclusions and
+limitations of liability form an essential basis of the
+bargain between the parties, and, absent any of these
+exclusions or limitations of liability, the provisions of this
+Agreement, including, without limitation, the economic terms,
+would be substantially different.
+
+
+1.6. Termination
+
+ 1. This Agreement will continue to apply until terminated by
+ either you or NVIDIA as described below.
+
+ 2. If you want to terminate this Agreement, you may do so by
+ stopping to use the SDK.
+
+ 3. NVIDIA may, at any time, terminate this Agreement if:
+
+ a. (i) you fail to comply with any term of this
+ Agreement and the non-compliance is not fixed within
+ thirty (30) days following notice from NVIDIA (or
+ immediately if you violate NVIDIA’s intellectual
+ property rights);
+
+ b. (ii) you commence or participate in any legal
+ proceeding against NVIDIA with respect to the SDK; or
+
+ c. (iii) NVIDIA decides to no longer provide the SDK in
+ a country or, in NVIDIA’s sole discretion, the
+ continued use of it is no longer commercially viable.
+
+ 4. Upon any termination of this Agreement, you agree to
+ promptly discontinue use of the SDK and destroy all copies
+ in your possession or control. Your prior distributions in
+ accordance with this Agreement are not affected by the
+ termination of this Agreement. Upon written request, you
+ will certify in writing that you have complied with your
+ commitments under this section. Upon any termination of
+ this Agreement all provisions survive except for the
+ license grant provisions.
+
+
+1.7. General
+
+If you wish to assign this Agreement or your rights and
+obligations, including by merger, consolidation, dissolution
+or operation of law, contact NVIDIA to ask for permission. Any
+attempted assignment not approved by NVIDIA in writing shall
+be void and of no effect. NVIDIA may assign, delegate or
+transfer this Agreement and its rights and obligations, and if
+to a non-affiliate you will be notified.
+
+You agree to cooperate with NVIDIA and provide reasonably
+requested information to verify your compliance with this
+Agreement.
+
+This Agreement will be governed in all respects by the laws of
+the United States and of the State of Delaware as those laws
+are applied to contracts entered into and performed entirely
+within Delaware by Delaware residents, without regard to the
+conflicts of laws principles. The United Nations Convention on
+Contracts for the International Sale of Goods is specifically
+disclaimed. You agree to all terms of this Agreement in the
+English language.
+
+The state or federal courts residing in Santa Clara County,
+California shall have exclusive jurisdiction over any dispute
+or claim arising out of this Agreement. Notwithstanding this,
+you agree that NVIDIA shall still be allowed to apply for
+injunctive remedies or an equivalent type of urgent legal
+relief in any jurisdiction.
+
+If any court of competent jurisdiction determines that any
+provision of this Agreement is illegal, invalid or
+unenforceable, such provision will be construed as limited to
+the extent necessary to be consistent with and fully
+enforceable under the law and the remaining provisions will
+remain in full force and effect. Unless otherwise specified,
+remedies are cumulative.
+
+Each party acknowledges and agrees that the other is an
+independent contractor in the performance of this Agreement.
+
+The SDK has been developed entirely at private expense and is
+“commercial items” consisting of “commercial computer
+software” and “commercial computer software
+documentation” provided with RESTRICTED RIGHTS. Use,
+duplication or disclosure by the U.S. Government or a U.S.
+Government subcontractor is subject to the restrictions in
+this Agreement pursuant to DFARS 227.7202-3(a) or as set forth
+in subparagraphs (c)(1) and (2) of the Commercial Computer
+Software - Restricted Rights clause at FAR 52.227-19, as
+applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas
+Expressway, Santa Clara, CA 95051.
+
+The SDK is subject to United States export laws and
+regulations. You agree that you will not ship, transfer or
+export the SDK into any country, or use the SDK in any manner,
+prohibited by the United States Bureau of Industry and
+Security or economic sanctions regulations administered by the
+U.S. Department of Treasury’s Office of Foreign Assets
+Control (OFAC), or any applicable export laws, restrictions or
+regulations. These laws include restrictions on destinations,
+end users and end use. By accepting this Agreement, you
+confirm that you are not a resident or citizen of any country
+currently embargoed by the U.S. and that you are not otherwise
+prohibited from receiving the SDK.
+
+Any notice delivered by NVIDIA to you under this Agreement
+will be delivered via mail, email or fax. You agree that any
+notices that NVIDIA sends you electronically will satisfy any
+legal communication requirements. Please direct your legal
+notices or other correspondence to NVIDIA Corporation, 2788
+San Tomas Expressway, Santa Clara, California 95051, United
+States of America, Attention: Legal Department.
+
+This Agreement and any exhibits incorporated into this
+Agreement constitute the entire agreement of the parties with
+respect to the subject matter of this Agreement and supersede
+all prior negotiations or documentation exchanged between the
+parties relating to this SDK license. Any additional and/or
+conflicting terms on documents issued by you are null, void,
+and invalid. Any amendment or waiver under this Agreement
+shall be in writing and signed by representatives of both
+parties.
+
+
+2. CUDA Toolkit Supplement to Software License Agreement for
+NVIDIA Software Development Kits
+------------------------------------------------------------
+
+
+Release date: August 16, 2018
+-----------------------------
+
+The terms in this supplement govern your use of the NVIDIA
+CUDA Toolkit SDK under the terms of your license agreement
+(“Agreement”) as modified by this supplement. Capitalized
+terms used but not defined below have the meaning assigned to
+them in the Agreement.
+
+This supplement is an exhibit to the Agreement and is
+incorporated as an integral part of the Agreement. In the
+event of conflict between the terms in this supplement and the
+terms in the Agreement, the terms in this supplement govern.
+
+
+2.1. License Scope
+
+The SDK is licensed for you to develop applications only for
+use in systems with NVIDIA GPUs.
+
+
+2.2. Distribution
+
+The portions of the SDK that are distributable under the
+Agreement are listed in Attachment A.
+
+
+2.3. Operating Systems
+
+Those portions of the SDK designed exclusively for use on the
+Linux or FreeBSD operating systems, or other operating systems
+derived from the source code to these operating systems, may
+be copied and redistributed for use in accordance with this
+Agreement, provided that the object code files are not
+modified in any way (except for unzipping of compressed
+files).
+
+
+2.4. Audio and Video Encoders and Decoders
+
+You acknowledge and agree that it is your sole responsibility
+to obtain any additional third-party licenses required to
+make, have made, use, have used, sell, import, and offer for
+sale your products or services that include or incorporate any
+third-party software and content relating to audio and/or
+video encoders and decoders from, including but not limited
+to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A.,
+MPEG-LA, and Coding Technologies. NVIDIA does not grant to you
+under this Agreement any necessary patent or other rights with
+respect to any audio and/or video encoders and decoders.
+
+
+2.5. Licensing
+
+If the distribution terms in this Agreement are not suitable
+for your organization, or for any questions regarding this
+Agreement, please contact NVIDIA at
+nvidia-compute-license-questions@nvidia.com.
+
+
+2.6. Attachment A
+
+The following portions of the SDK are distributable under the
+Agreement:
+
+Component
+
+CUDA Runtime
+
+Windows
+
+cudart.dll, cudart_static.lib, cudadevrt.lib
+
+Mac OSX
+
+libcudart.dylib, libcudart_static.a, libcudadevrt.a
+
+Linux
+
+libcudart.so, libcudart_static.a, libcudadevrt.a
+
+Android
+
+libcudart.so, libcudart_static.a, libcudadevrt.a
+
+Component
+
+CUDA FFT Library
+
+Windows
+
+cufft.dll, cufftw.dll, cufft.lib, cufftw.lib
+
+Mac OSX
+
+libcufft.dylib, libcufft_static.a, libcufftw.dylib,
+libcufftw_static.a
+
+Linux
+
+libcufft.so, libcufft_static.a, libcufftw.so,
+libcufftw_static.a
+
+Android
+
+libcufft.so, libcufft_static.a, libcufftw.so,
+libcufftw_static.a
+
+Component
+
+CUDA BLAS Library
+
+Windows
+
+cublas.dll, cublasLt.dll
+
+Mac OSX
+
+libcublas.dylib, libcublasLt.dylib, libcublas_static.a,
+libcublasLt_static.a
+
+Linux
+
+libcublas.so, libcublasLt.so, libcublas_static.a,
+libcublasLt_static.a
+
+Android
+
+libcublas.so, libcublasLt.so, libcublas_static.a,
+libcublasLt_static.a
+
+Component
+
+NVIDIA "Drop-in" BLAS Library
+
+Windows
+
+nvblas.dll
+
+Mac OSX
+
+libnvblas.dylib
+
+Linux
+
+libnvblas.so
+
+Component
+
+CUDA Sparse Matrix Library
+
+Windows
+
+cusparse.dll, cusparse.lib
+
+Mac OSX
+
+libcusparse.dylib, libcusparse_static.a
+
+Linux
+
+libcusparse.so, libcusparse_static.a
+
+Android
+
+libcusparse.so, libcusparse_static.a
+
+Component
+
+CUDA Linear Solver Library
+
+Windows
+
+cusolver.dll, cusolver.lib
+
+Mac OSX
+
+libcusolver.dylib, libcusolver_static.a
+
+Linux
+
+libcusolver.so, libcusolver_static.a
+
+Android
+
+libcusolver.so, libcusolver_static.a
+
+Component
+
+CUDA Random Number Generation Library
+
+Windows
+
+curand.dll, curand.lib
+
+Mac OSX
+
+libcurand.dylib, libcurand_static.a
+
+Linux
+
+libcurand.so, libcurand_static.a
+
+Android
+
+libcurand.so, libcurand_static.a
+
+Component
+
+CUDA Accelerated Graph Library
+
+Component
+
+NVIDIA Performance Primitives Library
+
+Windows
+
+nppc.dll, nppc.lib, nppial.dll, nppial.lib, nppicc.dll,
+nppicc.lib, nppicom.dll, nppicom.lib, nppidei.dll,
+nppidei.lib, nppif.dll, nppif.lib, nppig.dll, nppig.lib,
+nppim.dll, nppim.lib, nppist.dll, nppist.lib, nppisu.dll,
+nppisu.lib, nppitc.dll, nppitc.lib, npps.dll, npps.lib
+
+Mac OSX
+
+libnppc.dylib, libnppc_static.a, libnppial.dylib,
+libnppial_static.a, libnppicc.dylib, libnppicc_static.a,
+libnppicom.dylib, libnppicom_static.a, libnppidei.dylib,
+libnppidei_static.a, libnppif.dylib, libnppif_static.a,
+libnppig.dylib, libnppig_static.a, libnppim.dylib,
+libnppisu_static.a, libnppitc.dylib, libnppitc_static.a,
+libnpps.dylib, libnpps_static.a
+
+Linux
+
+libnppc.so, libnppc_static.a, libnppial.so,
+libnppial_static.a, libnppicc.so, libnppicc_static.a,
+libnppicom.so, libnppicom_static.a, libnppidei.so,
+libnppidei_static.a, libnppif.so, libnppif_static.a
+libnppig.so, libnppig_static.a, libnppim.so,
+libnppim_static.a, libnppist.so, libnppist_static.a,
+libnppisu.so, libnppisu_static.a, libnppitc.so
+libnppitc_static.a, libnpps.so, libnpps_static.a
+
+Android
+
+libnppc.so, libnppc_static.a, libnppial.so,
+libnppial_static.a, libnppicc.so, libnppicc_static.a,
+libnppicom.so, libnppicom_static.a, libnppidei.so,
+libnppidei_static.a, libnppif.so, libnppif_static.a
+libnppig.so, libnppig_static.a, libnppim.so,
+libnppim_static.a, libnppist.so, libnppist_static.a,
+libnppisu.so, libnppisu_static.a, libnppitc.so
+libnppitc_static.a, libnpps.so, libnpps_static.a
+
+Component
+
+NVIDIA JPEG Library
+
+Linux
+
+libnvjpeg.so, libnvjpeg_static.a
+
+Component
+
+Internal common library required for statically linking to
+cuBLAS, cuSPARSE, cuFFT, cuRAND, nvJPEG and NPP
+
+Mac OSX
+
+libculibos.a
+
+Linux
+
+libculibos.a
+
+Component
+
+NVIDIA Runtime Compilation Library and Header
+
+All
+
+nvrtc.h
+
+Windows
+
+nvrtc.dll, nvrtc-builtins.dll
+
+Mac OSX
+
+libnvrtc.dylib, libnvrtc-builtins.dylib
+
+Linux
+
+libnvrtc.so, libnvrtc-builtins.so
+
+Component
+
+NVIDIA Optimizing Compiler Library
+
+Windows
+
+nvvm.dll
+
+Mac OSX
+
+libnvvm.dylib
+
+Linux
+
+libnvvm.so
+
+Component
+
+NVIDIA Common Device Math Functions Library
+
+Windows
+
+libdevice.10.bc
+
+Mac OSX
+
+libdevice.10.bc
+
+Linux
+
+libdevice.10.bc
+
+Component
+
+CUDA Occupancy Calculation Header Library
+
+All
+
+cuda_occupancy.h
+
+Component
+
+CUDA Half Precision Headers
+
+All
+
+cuda_fp16.h, cuda_fp16.hpp
+
+Component
+
+CUDA Profiling Tools Interface (CUPTI) Library
+
+Windows
+
+cupti.dll
+
+Mac OSX
+
+libcupti.dylib
+
+Linux
+
+libcupti.so
+
+Component
+
+NVIDIA Tools Extension Library
+
+Windows
+
+nvToolsExt.dll, nvToolsExt.lib
+
+Mac OSX
+
+libnvToolsExt.dylib
+
+Linux
+
+libnvToolsExt.so
+
+Component
+
+NVIDIA CUDA Driver Libraries
+
+Linux
+
+libcuda.so, libnvidia-fatbinaryloader.so,
+libnvidia-ptxjitcompiler.so
+
+The NVIDIA CUDA Driver Libraries are only distributable in
+applications that meet this criteria:
+
+ 1. The application was developed starting from a NVIDIA CUDA
+ container obtained from Docker Hub or the NVIDIA GPU
+ Cloud, and
+
+ 2. The resulting application is packaged as a Docker
+ container and distributed to users on Docker Hub or the
+ NVIDIA GPU Cloud only.
+
+
+2.7. Attachment B
+
+
+Additional Licensing Obligations
+
+The following third party components included in the SOFTWARE
+are licensed to Licensee pursuant to the following terms and
+conditions:
+
+ 1. Licensee's use of the GDB third party component is
+ subject to the terms and conditions of GNU GPL v3:
+
+ This product includes copyrighted third-party software licensed
+ under the terms of the GNU General Public License v3 ("GPL v3").
+ All third-party software packages are copyright by their respective
+ authors. GPL v3 terms and conditions are hereby incorporated into
+ the Agreement by this reference: http://www.gnu.org/licenses/gpl.txt
+
+ Consistent with these licensing requirements, the software
+ listed below is provided under the terms of the specified
+ open source software licenses. To obtain source code for
+ software provided under licenses that require
+ redistribution of source code, including the GNU General
+ Public License (GPL) and GNU Lesser General Public License
+ (LGPL), contact oss-requests@nvidia.com. This offer is
+ valid for a period of three (3) years from the date of the
+ distribution of this product by NVIDIA CORPORATION.
+
+ Component License
+ CUDA-GDB GPL v3
+
+ 2. Licensee represents and warrants that any and all third
+ party licensing and/or royalty payment obligations in
+ connection with Licensee's use of the H.264 video codecs
+ are solely the responsibility of Licensee.
+
+ 3. Licensee's use of the Thrust library is subject to the
+ terms and conditions of the Apache License Version 2.0.
+ All third-party software packages are copyright by their
+ respective authors. Apache License Version 2.0 terms and
+ conditions are hereby incorporated into the Agreement by
+ this reference.
+ http://www.apache.org/licenses/LICENSE-2.0.html
+
+ In addition, Licensee acknowledges the following notice:
+ Thrust includes source code from the Boost Iterator,
+ Tuple, System, and Random Number libraries.
+
+ Boost Software License - Version 1.0 - August 17th, 2003
+ . . . .
+
+ Permission is hereby granted, free of charge, to any person or
+ organization obtaining a copy of the software and accompanying
+ documentation covered by this license (the "Software") to use,
+ reproduce, display, distribute, execute, and transmit the Software,
+ and to prepare derivative works of the Software, and to permit
+ third-parties to whom the Software is furnished to do so, all
+ subject to the following:
+
+ The copyright notices in the Software and this entire statement,
+ including the above license grant, this restriction and the following
+ disclaimer, must be included in all copies of the Software, in whole
+ or in part, and all derivative works of the Software, unless such
+ copies or derivative works are solely in the form of machine-executable
+ object code generated by a source language processor.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
+ NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
+ ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
+ OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE.
+
+ 4. Licensee's use of the LLVM third party component is
+ subject to the following terms and conditions:
+
+ ======================================================
+ LLVM Release License
+ ======================================================
+ University of Illinois/NCSA
+ Open Source License
+
+ Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign.
+ All rights reserved.
+
+ Developed by:
+
+ LLVM Team
+
+ University of Illinois at Urbana-Champaign
+
+ http://llvm.org
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to
+ deal with the Software without restriction, including without limitation the
+ rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ sell copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the LLVM Team, University of Illinois at Urbana-
+ Champaign, nor the names of its contributors may be used to endorse or
+ promote products derived from this Software without specific prior
+ written permission.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS WITH THE SOFTWARE.
+
+ 5. Licensee's use (e.g. nvprof) of the PCRE third party
+ component is subject to the following terms and
+ conditions:
+
+ ------------
+ PCRE LICENCE
+ ------------
+ PCRE is a library of functions to support regular expressions whose syntax
+ and semantics are as close as possible to those of the Perl 5 language.
+ Release 8 of PCRE is distributed under the terms of the "BSD" licence, as
+ specified below. The documentation for PCRE, supplied in the "doc"
+ directory, is distributed under the same terms as the software itself. The
+ basic library functions are written in C and are freestanding. Also
+ included in the distribution is a set of C++ wrapper functions, and a just-
+ in-time compiler that can be used to optimize pattern matching. These are
+ both optional features that can be omitted when the library is built.
+
+ THE BASIC LIBRARY FUNCTIONS
+ ---------------------------
+ Written by: Philip Hazel
+ Email local part: ph10
+ Email domain: cam.ac.uk
+ University of Cambridge Computing Service,
+ Cambridge, England.
+ Copyright (c) 1997-2012 University of Cambridge
+ All rights reserved.
+
+ PCRE JUST-IN-TIME COMPILATION SUPPORT
+ -------------------------------------
+ Written by: Zoltan Herczeg
+ Email local part: hzmester
+ Emain domain: freemail.hu
+ Copyright(c) 2010-2012 Zoltan Herczeg
+ All rights reserved.
+
+ STACK-LESS JUST-IN-TIME COMPILER
+ --------------------------------
+ Written by: Zoltan Herczeg
+ Email local part: hzmester
+ Emain domain: freemail.hu
+ Copyright(c) 2009-2012 Zoltan Herczeg
+ All rights reserved.
+
+ THE C++ WRAPPER FUNCTIONS
+ -------------------------
+ Contributed by: Google Inc.
+ Copyright (c) 2007-2012, Google Inc.
+ All rights reserved.
+
+ THE "BSD" LICENCE
+ -----------------
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the name of Google
+ Inc. nor the names of their contributors may be used to endorse or
+ promote products derived from this software without specific prior
+ written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+ 6. Some of the cuBLAS library routines were written by or
+ derived from code written by Vasily Volkov and are subject
+ to the Modified Berkeley Software Distribution License as
+ follows:
+
+ Copyright (c) 2007-2009, Regents of the University of California
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+ * Neither the name of the University of California, Berkeley nor
+ the names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior
+ written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+ 7. Some of the cuBLAS library routines were written by or
+ derived from code written by Davide Barbieri and are
+ subject to the Modified Berkeley Software Distribution
+ License as follows:
+
+ Copyright (c) 2008-2009 Davide Barbieri @ University of Rome Tor Vergata.
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+ * The name of the author may not be used to endorse or promote
+ products derived from this software without specific prior
+ written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+ 8. Some of the cuBLAS library routines were derived from
+ code developed by the University of Tennessee and are
+ subject to the Modified Berkeley Software Distribution
+ License as follows:
+
+ Copyright (c) 2010 The University of Tennessee.
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer listed in this license in the documentation and/or
+ other materials provided with the distribution.
+ * Neither the name of the copyright holders nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ 9. Some of the cuBLAS library routines were written by or
+ derived from code written by Jonathan Hogg and are subject
+ to the Modified Berkeley Software Distribution License as
+ follows:
+
+ Copyright (c) 2012, The Science and Technology Facilities Council (STFC).
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+ * Neither the name of the STFC nor the names of its contributors
+ may be used to endorse or promote products derived from this
+ software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE STFC BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ 10. Some of the cuBLAS library routines were written by or
+ derived from code written by Ahmad M. Abdelfattah, David
+ Keyes, and Hatem Ltaief, and are subject to the Apache
+ License, Version 2.0, as follows:
+
+ -- (C) Copyright 2013 King Abdullah University of Science and Technology
+ Authors:
+ Ahmad Abdelfattah (ahmad.ahmad@kaust.edu.sa)
+ David Keyes (david.keyes@kaust.edu.sa)
+ Hatem Ltaief (hatem.ltaief@kaust.edu.sa)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the King Abdullah University of Science and
+ Technology nor the names of its contributors may be used to endorse
+ or promote products derived from this software without specific prior
+ written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
+
+ 11. Some of the cuSPARSE library routines were written by or
+ derived from code written by Li-Wen Chang and are subject
+ to the NCSA Open Source License as follows:
+
+ Copyright (c) 2012, University of Illinois.
+
+ All rights reserved.
+
+ Developed by: IMPACT Group, University of Illinois, http://impact.crhc.illinois.edu
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal with the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimers in the documentation and/or other materials provided
+ with the distribution.
+ * Neither the names of IMPACT Group, University of Illinois, nor
+ the names of its contributors may be used to endorse or promote
+ products derived from this Software without specific prior
+ written permission.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+ IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+ SOFTWARE.
+
+ 12. Some of the cuRAND library routines were written by or
+ derived from code written by Mutsuo Saito and Makoto
+ Matsumoto and are subject to the following license:
+
+ Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ University. All rights reserved.
+
+ Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima
+ University and University of Tokyo. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+ * Neither the name of the Hiroshima University nor the names of
+ its contributors may be used to endorse or promote products
+ derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ 13. Some of the cuRAND library routines were derived from
+ code developed by D. E. Shaw Research and are subject to
+ the following license:
+
+ Copyright 2010-2011, D. E. Shaw Research.
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions, and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions, and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+ * Neither the name of D. E. Shaw Research nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ 14. Some of the Math library routines were written by or
+ derived from code developed by Norbert Juffa and are
+ subject to the following license:
+
+ Copyright (c) 2015-2017, Norbert Juffa
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ 15. Licensee's use of the lz4 third party component is
+ subject to the following terms and conditions:
+
+ Copyright (C) 2011-2013, Yann Collet.
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ 16. The NPP library uses code from the Boost Math Toolkit,
+ and is subject to the following license:
+
+ Boost Software License - Version 1.0 - August 17th, 2003
+ . . . .
+
+ Permission is hereby granted, free of charge, to any person or
+ organization obtaining a copy of the software and accompanying
+ documentation covered by this license (the "Software") to use,
+ reproduce, display, distribute, execute, and transmit the Software,
+ and to prepare derivative works of the Software, and to permit
+ third-parties to whom the Software is furnished to do so, all
+ subject to the following:
+
+ The copyright notices in the Software and this entire statement,
+ including the above license grant, this restriction and the following
+ disclaimer, must be included in all copies of the Software, in whole
+ or in part, and all derivative works of the Software, unless such
+ copies or derivative works are solely in the form of machine-executable
+ object code generated by a source language processor.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
+ NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
+ ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
+ OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE.
+
+ 17. Portions of the Nsight Eclipse Edition is subject to the
+ following license:
+
+ The Eclipse Foundation makes available all content in this plug-in
+ ("Content"). Unless otherwise indicated below, the Content is provided
+ to you under the terms and conditions of the Eclipse Public License
+ Version 1.0 ("EPL"). A copy of the EPL is available at http://
+ www.eclipse.org/legal/epl-v10.html. For purposes of the EPL, "Program"
+ will mean the Content.
+
+ If you did not receive this Content directly from the Eclipse
+ Foundation, the Content is being redistributed by another party
+ ("Redistributor") and different terms and conditions may apply to your
+ use of any object code in the Content. Check the Redistributor's
+ license that was provided with the Content. If no such license exists,
+ contact the Redistributor. Unless otherwise indicated below, the terms
+ and conditions of the EPL still apply to any source code in the
+ Content and such source code may be obtained at http://www.eclipse.org.
+
+ 18. Some of the cuBLAS library routines uses code from
+ OpenAI, which is subject to the following license:
+
+ License URL
+ https://github.com/openai/openai-gemm/blob/master/LICENSE
+
+ License Text
+ The MIT License
+
+ Copyright (c) 2016 OpenAI (http://openai.com), 2016 Google Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
+
+ 19. Licensee's use of the Visual Studio Setup Configuration
+ Samples is subject to the following license:
+
+ The MIT License (MIT)
+ Copyright (C) Microsoft Corporation. All rights reserved.
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the "Software"), to deal in the Software without restriction,
+ including without limitation the rights to use, copy, modify, merge,
+ publish, distribute, sublicense, and/or sell copies of the Software,
+ and to permit persons to whom the Software is furnished to do so,
+ subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ 20. Licensee's use of linmath.h header for CPU functions for
+ GL vector/matrix operations from lunarG is subject to the
+ Apache License Version 2.0.
+
+ 21. The DX12-CUDA sample uses the d3dx12.h header, which is
+ subject to the MIT license .
+
+-----------------
diff --git a/venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/METADATA b/venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..f68ecd51e33433972513aa313409942fd0752924
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/METADATA
@@ -0,0 +1,35 @@
+Metadata-Version: 2.1
+Name: nvidia-cuda-runtime-cu12
+Version: 12.1.105
+Summary: CUDA Runtime native Libraries
+Home-page: https://developer.nvidia.com/cuda-zone
+Author: Nvidia CUDA Installer Team
+Author-email: cuda_installer@nvidia.com
+License: NVIDIA Proprietary Software
+Keywords: cuda,nvidia,runtime,machine learning,deep learning
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Education
+Classifier: Intended Audience :: Science/Research
+Classifier: License :: Other/Proprietary License
+Classifier: Natural Language :: English
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Topic :: Scientific/Engineering
+Classifier: Topic :: Scientific/Engineering :: Mathematics
+Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
+Classifier: Topic :: Software Development
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: POSIX :: Linux
+Requires-Python: >=3
+License-File: License.txt
+
+CUDA Runtime native Libraries
diff --git a/venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/RECORD b/venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..f1fb37d2bd92efe9078242a7fe9c0bf62df90687
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/RECORD
@@ -0,0 +1,106 @@
+nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+nvidia/__pycache__/__init__.cpython-310.pyc,,
+nvidia/cuda_runtime/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+nvidia/cuda_runtime/__pycache__/__init__.cpython-310.pyc,,
+nvidia/cuda_runtime/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+nvidia/cuda_runtime/include/__pycache__/__init__.cpython-310.pyc,,
+nvidia/cuda_runtime/include/builtin_types.h,sha256=JxT9Vf2q2snxTBOL9ACzNmYzTWACO2VOVUu1KdFt7_g,3150
+nvidia/cuda_runtime/include/channel_descriptor.h,sha256=no_vNky02LeMLI0CF8GDVGHaPm_uRUGcVUMYdt_Xn4U,21482
+nvidia/cuda_runtime/include/common_functions.h,sha256=22LTZRVcPZzEH6MJda7nNMCvMgIjSTe0OKR7sEQj6kc,3410
+nvidia/cuda_runtime/include/cooperative_groups.h,sha256=JCMxtl4cNUFnymguM4_bTywhcfyxGqu_zOQIUh_Tc_g,59328
+nvidia/cuda_runtime/include/cooperative_groups/details/async.h,sha256=xsEHCZP3nuEY3l2p8SU2d1226XiXumUvDP_Gyh8PdVY,19122
+nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_reduce.h,sha256=vWv1tyxMjSMM2Oc0SdxXhCug_PwaBM6u8iMLjKyeqjE,4561
+nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_scan.h,sha256=DfZv5d5W0XJv-tZVhgrIdjLjs6aCx_u0oy1lDIpjo1Q,7314
+nvidia/cuda_runtime/include/cooperative_groups/details/driver_abi.h,sha256=v-ZUb4UgGKJk6NR2WCWHD3x_42y-togI1urFn70Gi-g,3964
+nvidia/cuda_runtime/include/cooperative_groups/details/functional.h,sha256=2BV8i8Bidz0kgxuYkJCAbwFxOIZRyzHgG-c_rVKhRzc,8905
+nvidia/cuda_runtime/include/cooperative_groups/details/helpers.h,sha256=GHIy-8awPZObSmP_FFWAnX7RQG9s1SD6L3yw5dNzhlM,23569
+nvidia/cuda_runtime/include/cooperative_groups/details/info.h,sha256=LSrEe6iTHuQRfc7RD3EHQbNqpED8eAbX4HLNyxXgKFA,12286
+nvidia/cuda_runtime/include/cooperative_groups/details/invoke.h,sha256=Osq3K-tZuXHVCMQJ708PjPo-BwMhjhjApO4b0TYLFJg,8616
+nvidia/cuda_runtime/include/cooperative_groups/details/memory.h,sha256=WU28eUcYLA1z131VYGulR4eVCSN9xK9KSxbV656YPs0,5484
+nvidia/cuda_runtime/include/cooperative_groups/details/partitioning.h,sha256=8hCh6F8sfkyfRgMirC37Nqv-b-gIY3A_J0eMYqmD2zU,6001
+nvidia/cuda_runtime/include/cooperative_groups/details/reduce.h,sha256=cbDjVSJVQ2_2pf2aP-X_rBkRVEWEFSYkc6oCx1fRQsQ,22744
+nvidia/cuda_runtime/include/cooperative_groups/details/scan.h,sha256=-Ttwb2AfEEY_tsmqJjR2dojkPpoRx387SoqxgvfdBtQ,17166
+nvidia/cuda_runtime/include/cooperative_groups/details/sync.h,sha256=fIEOjj7h3MoPqSOP3rkTNeedgS-0ZRkAHkzTAJDwJUA,10340
+nvidia/cuda_runtime/include/cooperative_groups/memcpy_async.h,sha256=erOIHuObdfxRhBWfrXE3wsZF4B2GUuqwzQrsPwKPpbg,2960
+nvidia/cuda_runtime/include/cooperative_groups/reduce.h,sha256=B0hgDkqM-6ueqTTgb3b34A0RH4vGz8mBf5e2jT1dJ1o,2949
+nvidia/cuda_runtime/include/cooperative_groups/scan.h,sha256=2EU6T5cWNwftm2B7FicV31PojoI61yo5fHXGRYkGk40,2940
+nvidia/cuda_runtime/include/cuComplex.h,sha256=WpcgpaiPhU_o9sTPMcNTEZuyXDIc8x3sz4dUWSztL2g,12186
+nvidia/cuda_runtime/include/cuda.h,sha256=CAY_j4D9qgEVLDxRftRObCI-vCpTTkSrLq9zBRVNwJI,933009
+nvidia/cuda_runtime/include/cudaEGL.h,sha256=_CwaQ4cEP1vfNyBSSd5qFxznPCYOovF6Cpj-QWSIBq4,39544
+nvidia/cuda_runtime/include/cudaEGLTypedefs.h,sha256=xF_FAN1Kar9oyHJ3cCU7jztTpxX8WylpiuYyYpGGHek,5645
+nvidia/cuda_runtime/include/cudaGL.h,sha256=gMT1HPGa-siuji0gAsKYr4X45Lc29HKglC_ttNSGyUM,22501
+nvidia/cuda_runtime/include/cudaGLTypedefs.h,sha256=dClpQI-LuXgF9rPSBsj7OkIg8g_fXDjT0hLZS8TGpOg,6576
+nvidia/cuda_runtime/include/cudaProfilerTypedefs.h,sha256=F2aWLIKv_AhNbxNOaZVcRsxIh0kuscnV8UMWWxkBAlY,3297
+nvidia/cuda_runtime/include/cudaTypedefs.h,sha256=guPSBrr4jNU6WBaBCqRiTuCbOYQDLMqYoz7bemoofWI,101855
+nvidia/cuda_runtime/include/cudaVDPAU.h,sha256=Np7Nc2Wjaz--hkpbhW6f9aapr-NbcPDAgkot0sJerco,12694
+nvidia/cuda_runtime/include/cudaVDPAUTypedefs.h,sha256=wz8nyOUdwM9mH9JO3QZW-A9dyxt-IufSX7nggSXpCNs,4144
+nvidia/cuda_runtime/include/cuda_awbarrier.h,sha256=3ZH-ZlXODhSiwSY9rqSni_EQwi25QMHP6Tm-zOdxBwE,9340
+nvidia/cuda_runtime/include/cuda_awbarrier_helpers.h,sha256=OCskCts5bCKl_RKBe9M74zKSIsVpePn44S_aJp1tFXE,12489
+nvidia/cuda_runtime/include/cuda_awbarrier_primitives.h,sha256=n5__E1jYYDhlgH-f3u8MQjtz57UZ7v5VshhMye1eicM,4699
+nvidia/cuda_runtime/include/cuda_bf16.h,sha256=zQ2idK7w0w7tRlgL_5Nyvy3FSrsOnu2W9Ya2YSFFT-E,149245
+nvidia/cuda_runtime/include/cuda_bf16.hpp,sha256=JlKs0yOZvJQPM6yFO3klxrhPTNN83e7um3ZFHMlYOKI,104876
+nvidia/cuda_runtime/include/cuda_device_runtime_api.h,sha256=2ZuNnXkRON3VchHM-OcKCdhljApIlfh-xKwupEfp5N4,39755
+nvidia/cuda_runtime/include/cuda_egl_interop.h,sha256=PNWYns30MIytJQHSOh7UbZYlaTX5e0bavzK14tde_C8,37109
+nvidia/cuda_runtime/include/cuda_fp16.h,sha256=aFLciB3o9QH3trYFl_P_dX58342UPfHXZGy2OeqEv1s,141782
+nvidia/cuda_runtime/include/cuda_fp16.hpp,sha256=kyHQA_rvZWpGq7sb2k3iTT3Zs1KYtr7TlEVvkGbQ61E,98606
+nvidia/cuda_runtime/include/cuda_fp8.h,sha256=Q3OP5o_3rSYbKtVIlcXVr_CncU3SPM-09j605e2Zegw,13833
+nvidia/cuda_runtime/include/cuda_fp8.hpp,sha256=pgYF_hzC2uAr7KNVyxBqrHTuM2bMaUPMUj7cY0kG3OU,56491
+nvidia/cuda_runtime/include/cuda_gl_interop.h,sha256=VQEswFeOBF6JN6Q0pdlkvc5WT7bD1FnTfKewvANulCc,19150
+nvidia/cuda_runtime/include/cuda_occupancy.h,sha256=Kr9HyOe-hlRjBAzbINwUYkNgbbIgIjuvKs09UZhMYQo,67179
+nvidia/cuda_runtime/include/cuda_pipeline.h,sha256=0enXG49wN4JajlQi3ahbp2ei_ufTY_Mznic7zfWmKHM,8130
+nvidia/cuda_runtime/include/cuda_pipeline_helpers.h,sha256=bo1L7e6vCuM-K3Il8K1z4wJUja5DyXQKdo_hSWUME-E,13852
+nvidia/cuda_runtime/include/cuda_pipeline_primitives.h,sha256=FnJJtuV6rHr6LgL56XDwilcSbFr6W1Hj6mf1AJaMI20,8675
+nvidia/cuda_runtime/include/cuda_runtime.h,sha256=NKUshOJapRWSe0CPJx-KllF9y3ZibUd9bM1OVUU52H4,88281
+nvidia/cuda_runtime/include/cuda_runtime_api.h,sha256=rUQw7deoB1R5LKEgLKwqFdoX6eke4bcp98CJ6iCk_uk,560622
+nvidia/cuda_runtime/include/cuda_surface_types.h,sha256=Mw5Lo4b8Q-f9mogOvATGyHhu9d2t2K6XOxuqtZrSh3A,3688
+nvidia/cuda_runtime/include/cuda_texture_types.h,sha256=ITbX-JNnP7Rm-JSgNVdJ9pq6k8FVor8RbnruDsKq6sk,3688
+nvidia/cuda_runtime/include/cuda_vdpau_interop.h,sha256=bXQanWc2IFXZAKWNGl2xAz9nLvFmQpWyGrsDvfeS9FA,7727
+nvidia/cuda_runtime/include/cudart_platform.h,sha256=YN6sKhB0b9w5tGX1IYL7ulJVPrWAiX9A44qLv4EtW5Q,2717
+nvidia/cuda_runtime/include/device_atomic_functions.h,sha256=o448l6Ep35UHnqcPSQXICvK4Vusc9mVjkyQDq0vV14E,11883
+nvidia/cuda_runtime/include/device_atomic_functions.hpp,sha256=_UsoVsyP7U-9CUUCbC1QLw6IbFFkKzxk458vLbAXzOY,8149
+nvidia/cuda_runtime/include/device_double_functions.h,sha256=KUxId5Z1fx8SWfLRTxPD7RB-zN7zslzb4n7JaJLfL3I,3452
+nvidia/cuda_runtime/include/device_functions.h,sha256=bWSrhTYE9NQlss7xMSMEVusvto9j2fgUDXWVH2W_cOA,3410
+nvidia/cuda_runtime/include/device_launch_parameters.h,sha256=H1_CC-vvAaS26ys4XsTFkMgTxUTciAjdjswjizkisvQ,3846
+nvidia/cuda_runtime/include/device_types.h,sha256=2LFxoZBJPoA5V0H1EbKTEaXDi3GDJPtzOPdRHDaucIQ,3588
+nvidia/cuda_runtime/include/driver_functions.h,sha256=cN3IjRAz2Mj2Pj35SyxJIkZNDDusnJqaqzBdMzpQKbA,4625
+nvidia/cuda_runtime/include/driver_types.h,sha256=Oti6YeNU-DHsXp6r1wu5JSIGOUWgCXiED-N0DEWVlK0,144785
+nvidia/cuda_runtime/include/host_config.h,sha256=BscH_GazAZbbotddVzL5RmafbQ-QjRx8f-I1O01IBW8,3380
+nvidia/cuda_runtime/include/host_defines.h,sha256=bBQwQF5C1N1c2qpLV56g1c-weu9Ysgz-gIf2Kn3uz_A,3386
+nvidia/cuda_runtime/include/library_types.h,sha256=yJvoLFw5oBdRqkQgEhIaX-stsMGlxQW9sZoJ4vbQHwI,4766
+nvidia/cuda_runtime/include/math_constants.h,sha256=cV6hAyQe8X7f7MBtaKjjIJq3BycOUDp6I5cizJX5HLw,7608
+nvidia/cuda_runtime/include/math_functions.h,sha256=5XcC6j-fJKttvhwc4hZNoLHNw808a2ZYIOtZ7ry7yd0,3398
+nvidia/cuda_runtime/include/mma.h,sha256=IY_VenxuEncwGq92MhrWUb-Xswh0ekAXLy9Rbxhxa2Y,2932
+nvidia/cuda_runtime/include/sm_20_atomic_functions.h,sha256=j5zuwIb71KmDLf43RoOjwiudGYvBk_k2PRsj8sy5xXI,4942
+nvidia/cuda_runtime/include/sm_20_atomic_functions.hpp,sha256=Cx__BPJKUPeG5qMxZs9ztfIyqWqt0wZDZi4V_5EV4LQ,3929
+nvidia/cuda_runtime/include/sm_20_intrinsics.h,sha256=cQbeg-K9zWgOI4jAVeUmV1WiWOMF5sHPz_nb3CWdAjU,51052
+nvidia/cuda_runtime/include/sm_20_intrinsics.hpp,sha256=BhEBuXSKBsNGJDBJDtYL0cGRI3wX_w_OIgA5D-YxIWk,7694
+nvidia/cuda_runtime/include/sm_30_intrinsics.h,sha256=kafRv2e_iMvwNfGEP5yIyjRBFx97tdkpT5me9RvbOuo,16375
+nvidia/cuda_runtime/include/sm_30_intrinsics.hpp,sha256=yX0ebd265tJ-BDhvluP2BhadPuWXpRZPI2eeQFFt5ys,24567
+nvidia/cuda_runtime/include/sm_32_atomic_functions.h,sha256=V1VteWKbW09qoItfQp0DbHj7R_e3bxX24NRGnd18Jc4,6812
+nvidia/cuda_runtime/include/sm_32_atomic_functions.hpp,sha256=HcKoB3ujG_AVTzIaD_MjRCaaRZL8khqI_cJqHwCaP5g,5416
+nvidia/cuda_runtime/include/sm_32_intrinsics.h,sha256=o7IwBBKu2lDZwzHHb2pOLAvyCNpCoEKSHPt0dFaSspI,33390
+nvidia/cuda_runtime/include/sm_32_intrinsics.hpp,sha256=Gl8aSLDLcit4W3pKQS19GsDG8RYcwD65HwYB_CeZe8M,70616
+nvidia/cuda_runtime/include/sm_35_atomic_functions.h,sha256=a3XoEsKRCEOf0Q_5Y__rMfmC4pScv4VkUggVgVJVn44,2909
+nvidia/cuda_runtime/include/sm_35_intrinsics.h,sha256=BEiPNO03ZSv5XtMMul5jiTH4oLWlOu3CYkIAgrWslnk,2952
+nvidia/cuda_runtime/include/sm_60_atomic_functions.h,sha256=E5nwZxyIL48AMUIFxZmwzfWaPXOMpjJsoEIQcY7LzPM,20902
+nvidia/cuda_runtime/include/sm_60_atomic_functions.hpp,sha256=bSnj2_G8asEbiu8aPuf3OACDuT_-kw6TuBlU1QtLLfY,15081
+nvidia/cuda_runtime/include/sm_61_intrinsics.h,sha256=eEL9MmGSOpD9DohErXPflc0k2loEcMzDVKZYiUZx7hY,6030
+nvidia/cuda_runtime/include/sm_61_intrinsics.hpp,sha256=N-nQvcBsPMT2Umy5zR69c9K1q366W-Jqe7NpoLTqTmg,6787
+nvidia/cuda_runtime/include/surface_functions.h,sha256=b1O82SAvEgWWxA9uZTWQcGimzZUoem2QbAET3wh3fZc,6782
+nvidia/cuda_runtime/include/surface_indirect_functions.h,sha256=vy9QuFVV-ezZP-x2RT9RLp2qIUgdngACOCmalSfVFPA,10877
+nvidia/cuda_runtime/include/surface_types.h,sha256=Di766cyRUqNN4JkOnYM3teFqrwMZ02hXMDB_R_2_vz4,4460
+nvidia/cuda_runtime/include/texture_fetch_functions.h,sha256=KLCmUxf5aY5_UalX8tSFB6e4TrjA8hyUPxLOkMFltAo,12468
+nvidia/cuda_runtime/include/texture_indirect_functions.h,sha256=lH_y3Ni-hq4RZ0_PMFbBM0th5-OmTn3TtqtpkHHhA8w,21163
+nvidia/cuda_runtime/include/texture_types.h,sha256=cFqQ6sC4y79Q6YxjLSY_bknwMgKJAOwPdKDARLPFrDI,6290
+nvidia/cuda_runtime/include/vector_functions.h,sha256=R5plWOkFciltO_AS5if8NcmsgDp3cFNq6zFFDd3oofk,7847
+nvidia/cuda_runtime/include/vector_functions.hpp,sha256=afXhNSd3LFTZo96EPtesTLfvxd4nTmLVzgkj967rTRg,10060
+nvidia/cuda_runtime/include/vector_types.h,sha256=ruVFRp8RioWR9mrvLXX9S15ZSJ97wqTjA8ORCJKKzOQ,13206
+nvidia/cuda_runtime/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+nvidia/cuda_runtime/lib/__pycache__/__init__.cpython-310.pyc,,
+nvidia/cuda_runtime/lib/libcudart.so.12,sha256=kzX2opypEBDi2p9A6C-0so46SuIv04XhKT6TvzxGyeY,679264
+nvidia_cuda_runtime_cu12-12.1.105.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+nvidia_cuda_runtime_cu12-12.1.105.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262
+nvidia_cuda_runtime_cu12-12.1.105.dist-info/METADATA,sha256=9_P2cqjEZ020xhA9HzY1klL7YVuIjKETxs1wKAXeYoQ,1507
+nvidia_cuda_runtime_cu12-12.1.105.dist-info/RECORD,,
+nvidia_cuda_runtime_cu12-12.1.105.dist-info/WHEEL,sha256=-kQi_VMfvRQozZJT7HUPMfY-5vLo0LVTmAylNJ3Ft98,106
+nvidia_cuda_runtime_cu12-12.1.105.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7
diff --git a/venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/WHEEL b/venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..06e355fe0e3ed7077903f119ae6928a17da8eb6f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.37.1)
+Root-Is-Purelib: true
+Tag: py3-none-manylinux1_x86_64
+
diff --git a/venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..862f7abf232cdfbb928609856247292e81c9decb
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/top_level.txt
@@ -0,0 +1 @@
+nvidia
diff --git a/venv/lib/python3.10/site-packages/peft/tuners/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/peft/tuners/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8b5badf1eebfa8d0726e8f64115ffb70a3e243fe
Binary files /dev/null and b/venv/lib/python3.10/site-packages/peft/tuners/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/peft/tuners/__pycache__/lycoris_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/peft/tuners/__pycache__/lycoris_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..672f98542ed4c13d7c50a96d9cf627f4ca46fa7f
Binary files /dev/null and b/venv/lib/python3.10/site-packages/peft/tuners/__pycache__/lycoris_utils.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/peft/tuners/__pycache__/tuners_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/peft/tuners/__pycache__/tuners_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0ba4f68204d5cf58610d2664d0b5cd63a792101b
Binary files /dev/null and b/venv/lib/python3.10/site-packages/peft/tuners/__pycache__/tuners_utils.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..229097ad4cbf0eb32a89ef6be45b4d060e8e94f2
Binary files /dev/null and b/venv/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/aqlm.cpython-310.pyc b/venv/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/aqlm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8755642a9810b09625a8b3b96267f214c1eb28d7
Binary files /dev/null and b/venv/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/aqlm.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/awq.cpython-310.pyc b/venv/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/awq.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1115d9964a4ecc098f775281ed5d92e350063f58
Binary files /dev/null and b/venv/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/awq.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/bnb.cpython-310.pyc b/venv/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/bnb.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4ab36cb0c388ded2f92f65213401e0e3fba84f26
Binary files /dev/null and b/venv/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/bnb.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/config.cpython-310.pyc b/venv/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/config.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..248b2b08e0e4596447ad8a914ca37b26cee81d8f
Binary files /dev/null and b/venv/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/config.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/gptq.cpython-310.pyc b/venv/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/gptq.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f48320301d7395671d6b8cdea9e5dae99d5b970f
Binary files /dev/null and b/venv/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/gptq.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/layer.cpython-310.pyc b/venv/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/layer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..afc79b8ef9b11dbf0e72e9f743654772d2ce2529
Binary files /dev/null and b/venv/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/layer.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/model.cpython-310.pyc b/venv/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/model.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1e1678172d86f717b3a58fabab45bdcd4d251e78
Binary files /dev/null and b/venv/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/model.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/tp_layer.cpython-310.pyc b/venv/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/tp_layer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..92f717d0094512b937fadc75bd3b68dfc14a81bc
Binary files /dev/null and b/venv/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/tp_layer.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__init__.py b/venv/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..71795b61d819573ff41770e6d49c750e6c51b0ae
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__init__.py
@@ -0,0 +1,19 @@
+# Copyright 2023-present the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .config import PromptTuningConfig, PromptTuningInit
+from .model import PromptEmbedding
+
+
+__all__ = ["PromptTuningConfig", "PromptEmbedding", "PromptTuningInit"]
diff --git a/venv/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b809919ccf18cb45987fdb90d8b1a9082400da9f
Binary files /dev/null and b/venv/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__pycache__/config.cpython-310.pyc b/venv/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__pycache__/config.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..72b74dca4cf286f8781ac795bf449f98ae415d29
Binary files /dev/null and b/venv/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__pycache__/config.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__pycache__/model.cpython-310.pyc b/venv/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__pycache__/model.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9c0f3ffcdf4583f3d3f5c75948891abf7dd7341d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__pycache__/model.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/peft/tuners/prompt_tuning/config.py b/venv/lib/python3.10/site-packages/peft/tuners/prompt_tuning/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..d9987e112abc389f623a6ae4f7df90bd70c8439a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/peft/tuners/prompt_tuning/config.py
@@ -0,0 +1,86 @@
+# Copyright 2023-present the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import enum
+from dataclasses import dataclass, field
+from typing import Optional, Union
+
+from peft.config import PromptLearningConfig
+from peft.utils import PeftType
+
+
+class PromptTuningInit(str, enum.Enum):
+ TEXT = "TEXT"
+ RANDOM = "RANDOM"
+
+
+@dataclass
+class PromptTuningConfig(PromptLearningConfig):
+ """
+ This is the configuration class to store the configuration of a [`PromptEmbedding`].
+
+ Args:
+ prompt_tuning_init (Union[[`PromptTuningInit`], `str`]): The initialization of the prompt embedding.
+ prompt_tuning_init_text (`str`, *optional*):
+ The text to initialize the prompt embedding. Only used if `prompt_tuning_init` is `TEXT`.
+ tokenizer_name_or_path (`str`, *optional*):
+ The name or path of the tokenizer. Only used if `prompt_tuning_init` is `TEXT`.
+ tokenizer_kwargs (`dict`, *optional*):
+ The keyword arguments to pass to `AutoTokenizer.from_pretrained`. Only used if `prompt_tuning_init` is
+ `TEXT`.
+ """
+
+ prompt_tuning_init: Union[PromptTuningInit, str] = field(
+ default=PromptTuningInit.RANDOM,
+ metadata={"help": "How to initialize the prompt tuning parameters"},
+ )
+ prompt_tuning_init_text: Optional[str] = field(
+ default=None,
+ metadata={
+ "help": "The text to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`"
+ },
+ )
+ tokenizer_name_or_path: Optional[str] = field(
+ default=None,
+ metadata={
+ "help": "The tokenizer to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`"
+ },
+ )
+
+ tokenizer_kwargs: Optional[dict] = field(
+ default=None,
+ metadata={
+ "help": (
+ "The keyword arguments to pass to `AutoTokenizer.from_pretrained`. Only used if prompt_tuning_init is "
+ "`TEXT`"
+ ),
+ },
+ )
+
+ def __post_init__(self):
+ self.peft_type = PeftType.PROMPT_TUNING
+ if (self.prompt_tuning_init == PromptTuningInit.TEXT) and not self.tokenizer_name_or_path:
+ raise ValueError(
+ f"When prompt_tuning_init='{PromptTuningInit.TEXT.value}', "
+ f"tokenizer_name_or_path can't be {self.tokenizer_name_or_path}."
+ )
+ if (self.prompt_tuning_init == PromptTuningInit.TEXT) and self.prompt_tuning_init_text is None:
+ raise ValueError(
+ f"When prompt_tuning_init='{PromptTuningInit.TEXT.value}', "
+ f"prompt_tuning_init_text can't be {self.prompt_tuning_init_text}."
+ )
+ if self.tokenizer_kwargs and (self.prompt_tuning_init != PromptTuningInit.TEXT):
+ raise ValueError(
+ f"tokenizer_kwargs only valid when using prompt_tuning_init='{PromptTuningInit.TEXT.value}'."
+ )
diff --git a/venv/lib/python3.10/site-packages/peft/tuners/prompt_tuning/model.py b/venv/lib/python3.10/site-packages/peft/tuners/prompt_tuning/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..a04221c2abfd1fb806df2805a7a28e4e3073a32d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/peft/tuners/prompt_tuning/model.py
@@ -0,0 +1,89 @@
+# Copyright 2023-present the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import math
+
+import torch
+
+from .config import PromptTuningInit
+
+
+class PromptEmbedding(torch.nn.Module):
+ """
+ The model to encode virtual tokens into prompt embeddings.
+
+ Args:
+ config ([`PromptTuningConfig`]): The configuration of the prompt embedding.
+ word_embeddings (`torch.nn.Module`): The word embeddings of the base transformer model.
+
+ **Attributes**:
+ - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prompt embedding.
+
+ Example:
+
+ ```py
+ >>> from peft import PromptEmbedding, PromptTuningConfig
+
+ >>> config = PromptTuningConfig(
+ ... peft_type="PROMPT_TUNING",
+ ... task_type="SEQ_2_SEQ_LM",
+ ... num_virtual_tokens=20,
+ ... token_dim=768,
+ ... num_transformer_submodules=1,
+ ... num_attention_heads=12,
+ ... num_layers=12,
+ ... prompt_tuning_init="TEXT",
+ ... prompt_tuning_init_text="Predict if sentiment of this review is positive, negative or neutral",
+ ... tokenizer_name_or_path="t5-base",
+ ... )
+
+ >>> # t5_model.shared is the word embeddings of the base model
+ >>> prompt_embedding = PromptEmbedding(config, t5_model.shared)
+ ```
+
+ Input Shape: (`batch_size`, `total_virtual_tokens`)
+
+ Output Shape: (`batch_size`, `total_virtual_tokens`, `token_dim`)
+ """
+
+ def __init__(self, config, word_embeddings):
+ super().__init__()
+
+ total_virtual_tokens = config.num_virtual_tokens * config.num_transformer_submodules
+ self.embedding = torch.nn.Embedding(total_virtual_tokens, config.token_dim)
+ if config.prompt_tuning_init == PromptTuningInit.TEXT and not config.inference_mode:
+ from transformers import AutoTokenizer
+
+ tokenizer_kwargs = config.tokenizer_kwargs or {}
+ tokenizer = AutoTokenizer.from_pretrained(config.tokenizer_name_or_path, **tokenizer_kwargs)
+ init_text = config.prompt_tuning_init_text
+ init_token_ids = tokenizer(init_text)["input_ids"]
+ # Trim or iterate until num_text_tokens matches total_virtual_tokens
+ num_text_tokens = len(init_token_ids)
+ if num_text_tokens > total_virtual_tokens:
+ init_token_ids = init_token_ids[:total_virtual_tokens]
+ elif num_text_tokens < total_virtual_tokens:
+ num_reps = math.ceil(total_virtual_tokens / num_text_tokens)
+ init_token_ids = init_token_ids * num_reps
+ init_token_ids = init_token_ids[:total_virtual_tokens]
+ init_token_ids = torch.LongTensor(init_token_ids).to(word_embeddings.weight.device)
+
+ word_embedding_weights = word_embeddings(init_token_ids).detach().clone()
+ word_embedding_weights = word_embedding_weights.to(torch.float32)
+ self.embedding.weight = torch.nn.Parameter(word_embedding_weights)
+
+ def forward(self, indices):
+ # Just get embeddings
+ prompt_embeddings = self.embedding(indices)
+ return prompt_embeddings
diff --git a/venv/lib/python3.10/site-packages/peft/utils/__init__.py b/venv/lib/python3.10/site-packages/peft/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d84d7759a8230671b8ce63ebf0d9b75d3416add
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/peft/utils/__init__.py
@@ -0,0 +1,52 @@
+# flake8: noqa
+# There's no way to ignore "F401 '...' imported but unused" warnings in this
+# module, but to preserve other warnings. So, don't check this module at all
+
+# coding=utf-8
+# Copyright 2023-present the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# from .config import PeftConfig, PeftType, PromptLearningConfig, TaskType
+from .loftq_utils import replace_lora_weights_loftq
+from .peft_types import PeftType, TaskType
+from .other import (
+ TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING,
+ TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING,
+ TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING,
+ TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING,
+ TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING,
+ CONFIG_NAME,
+ WEIGHTS_NAME,
+ SAFETENSORS_WEIGHTS_NAME,
+ INCLUDE_LINEAR_LAYERS_SHORTHAND,
+ _set_trainable,
+ bloom_model_postprocess_past_key_value,
+ prepare_model_for_kbit_training,
+ prepare_model_for_kbit_training,
+ shift_tokens_right,
+ transpose,
+ _get_batch_size,
+ _get_submodules,
+ _set_adapter,
+ _freeze_adapter,
+ ModulesToSaveWrapper,
+ _prepare_prompt_learning_config,
+ _is_valid_match,
+ infer_device,
+ get_auto_gptq_quant_linear,
+ get_quantization_config,
+ id_tensor_storage,
+ cast_mixed_precision_params,
+)
+from .save_and_load import get_peft_model_state_dict, set_peft_model_state_dict, load_peft_weights
diff --git a/venv/lib/python3.10/site-packages/peft/utils/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/peft/utils/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..79497cad97bf537a735842f41d473273bc7c4e13
Binary files /dev/null and b/venv/lib/python3.10/site-packages/peft/utils/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/peft/utils/__pycache__/constants.cpython-310.pyc b/venv/lib/python3.10/site-packages/peft/utils/__pycache__/constants.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..747c329f65ca3b9258019d8dc466d73b0e75b996
Binary files /dev/null and b/venv/lib/python3.10/site-packages/peft/utils/__pycache__/constants.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/peft/utils/__pycache__/integrations.cpython-310.pyc b/venv/lib/python3.10/site-packages/peft/utils/__pycache__/integrations.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bfba9daf86682b6eb209c2a46bcc3fc46ab55134
Binary files /dev/null and b/venv/lib/python3.10/site-packages/peft/utils/__pycache__/integrations.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/peft/utils/__pycache__/loftq_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/peft/utils/__pycache__/loftq_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2fcb0a0303cd5fe8b0866ba302f9b610e35d6f9a
Binary files /dev/null and b/venv/lib/python3.10/site-packages/peft/utils/__pycache__/loftq_utils.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/peft/utils/__pycache__/merge_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/peft/utils/__pycache__/merge_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..66fad9f82ca5c9ca6bc563d5f7bc6aab573672f2
Binary files /dev/null and b/venv/lib/python3.10/site-packages/peft/utils/__pycache__/merge_utils.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/peft/utils/__pycache__/other.cpython-310.pyc b/venv/lib/python3.10/site-packages/peft/utils/__pycache__/other.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..32dd7b0c184344f83b4ee33ce4fd7854ce79d52b
Binary files /dev/null and b/venv/lib/python3.10/site-packages/peft/utils/__pycache__/other.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/peft/utils/__pycache__/peft_types.cpython-310.pyc b/venv/lib/python3.10/site-packages/peft/utils/__pycache__/peft_types.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..529df7b27bbb1aecd20993f5021602c97912193d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/peft/utils/__pycache__/peft_types.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/peft/utils/__pycache__/save_and_load.cpython-310.pyc b/venv/lib/python3.10/site-packages/peft/utils/__pycache__/save_and_load.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..efd4c1f1074fb754db482c04c2c349dde771b619
Binary files /dev/null and b/venv/lib/python3.10/site-packages/peft/utils/__pycache__/save_and_load.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/peft/utils/constants.py b/venv/lib/python3.10/site-packages/peft/utils/constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..41047431cef3c18c603b4a331b791cbf00f4cdf2
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/peft/utils/constants.py
@@ -0,0 +1,158 @@
+# Copyright 2023-present the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import torch
+
+
+# needed for prefix-tuning of bloom model
+def bloom_model_postprocess_past_key_value(past_key_values):
+ past_key_values = torch.cat(past_key_values)
+ total_layers, batch_size, num_attention_heads, num_virtual_tokens, head_dim = past_key_values.shape
+ keys = past_key_values[: total_layers // 2]
+ keys = keys.transpose(2, 3).reshape(
+ total_layers // 2, batch_size * num_attention_heads, head_dim, num_virtual_tokens
+ )
+ values = past_key_values[total_layers // 2 :]
+ values = values.reshape(total_layers // 2, batch_size * num_attention_heads, num_virtual_tokens, head_dim)
+
+ return tuple(zip(keys, values))
+
+
+# needed for prefix-tuning of StarCoder models
+def starcoder_model_postprocess_past_key_value(past_key_values):
+ result = []
+ for k in past_key_values:
+ k = k[:, :, 0]
+ k = k.permute([1, 2, 0, 3])
+ k = k.reshape(*k.shape[:-2], -1)
+ result.append(k)
+ return tuple(result)
+
+
+TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING = {
+ "bloom": bloom_model_postprocess_past_key_value,
+ "gpt_bigcode": starcoder_model_postprocess_past_key_value,
+}
+
+
+TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING = {
+ "t5": ["q", "v"],
+ "mt5": ["q", "v"],
+ "bart": ["q_proj", "v_proj"],
+ "gpt2": ["c_attn"],
+ "bloom": ["query_key_value"],
+ "blip-2": ["q", "v", "q_proj", "v_proj"],
+ "opt": ["q_proj", "v_proj"],
+ "gptj": ["q_proj", "v_proj"],
+ "gpt_neox": ["query_key_value"],
+ "gpt_neo": ["q_proj", "v_proj"],
+ "bert": ["query", "value"],
+ "roberta": ["query", "value"],
+ "xlm-roberta": ["query", "value"],
+ "electra": ["query", "value"],
+ "deberta-v2": ["query_proj", "value_proj"],
+ "deberta": ["in_proj"],
+ "layoutlm": ["query", "value"],
+ "llama": ["q_proj", "v_proj"],
+ "chatglm": ["query_key_value"],
+ "gpt_bigcode": ["c_attn"],
+ "mpt": ["Wqkv"],
+ "RefinedWebModel": ["query_key_value"],
+ "RefinedWeb": ["query_key_value"],
+ "falcon": ["query_key_value"],
+ "btlm": ["c_proj", "c_attn"],
+ "codegen": ["qkv_proj"],
+ "mistral": ["q_proj", "v_proj"],
+ "mixtral": ["q_proj", "v_proj"],
+ "stablelm": ["q_proj", "v_proj"],
+ "phi": ["q_proj", "v_proj", "fc1", "fc2"],
+ "gemma": ["q_proj", "v_proj"],
+}
+
+TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING = {
+ "t5": ["k", "v", "wo"],
+ "mt5": ["k", "v", "wi_1"],
+ "gpt2": ["c_attn", "mlp.c_proj"],
+ "bloom": ["query_key_value", "mlp.dense_4h_to_h"],
+ "roberta": ["key", "value", "output.dense"],
+ "opt": ["q_proj", "k_proj", "fc2"],
+ "gptj": ["q_proj", "v_proj", "fc_out"],
+ "gpt_neox": ["query_key_value", "dense_4h_to_h"],
+ "gpt_neo": ["q_proj", "v_proj", "c_proj"],
+ "bart": ["q_proj", "v_proj", "fc2"],
+ "gpt_bigcode": ["c_attn", "mlp.c_proj"],
+ "llama": ["k_proj", "v_proj", "down_proj"],
+ "mistral": ["k_proj", "v_proj", "down_proj"],
+ "mixtral": ["k_proj", "v_proj", "w2"],
+ "bert": ["key", "value", "output.dense"],
+ "deberta-v2": ["key_proj", "value_proj", "output.dense"],
+ "deberta": ["in_proj", "output.dense"],
+ "RefinedWebModel": ["query_key_value", "dense_4h_to_h"],
+ "RefinedWeb": ["query_key_value", "dense_4h_to_h"],
+ "falcon": ["query_key_value", "dense_4h_to_h"],
+ "phi": ["q_proj", "v_proj", "fc2"],
+ "gemma": ["q_proj", "v_proj", "down_proj"],
+}
+
+TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING = {
+ "t5": ["wo"],
+ "mt5": [],
+ "gpt2": ["mlp.c_proj"],
+ "bloom": ["mlp.dense_4h_to_h"],
+ "roberta": ["output.dense"],
+ "opt": ["fc2"],
+ "gptj": ["fc_out"],
+ "gpt_neox": ["dense_4h_to_h"],
+ "gpt_neo": ["c_proj"],
+ "bart": ["fc2"],
+ "gpt_bigcode": ["mlp.c_proj"],
+ "llama": ["down_proj"],
+ "mistral": ["down_proj"],
+ "mixtral": ["w2"],
+ "bert": ["output.dense"],
+ "deberta-v2": ["output.dense"],
+ "deberta": ["output.dense"],
+ "RefinedWeb": ["dense_4h_to_h"],
+ "RefinedWebModel": ["dense_4h_to_h"],
+ "falcon": ["dense_4h_to_h"],
+ "phi": ["fc2"],
+ "gemma": ["down_proj"],
+}
+
+TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING = {
+ "t5": ["q", "k", "v", "o", "wi", "wo"],
+ "mt5": ["q", "k", "v", "o", "wi_0", "wi_1", "wo"],
+ "bart": ["q_proj", "k_proj", "v_proj", "out_proj", "fc1", "fc2"],
+ "gpt2": ["c_attn"],
+ "bloom": ["query_key_value"],
+ "opt": ["q_proj", "k_proj", "v_proj", "out_proj", "fc1", "fc2"],
+ "gptj": ["q_proj", "v_proj"],
+ "gpt_neox": ["query_key_value"],
+ "gpt_neo": ["q_proj", "v_proj"],
+ "llama": ["q_proj", "v_proj"],
+ "bert": ["query", "value"],
+ "roberta": ["query", "key", "value", "dense"],
+ # "xlm-roberta": ["query", "value"],
+ # "electra": ["query", "value"],
+ "deberta-v2": ["query_proj", "key_proj", "value_proj", "dense"],
+ "gpt_bigcode": ["c_attn"],
+ "deberta": ["in_proj"],
+ # "layoutlm": ["query", "value"],
+}
+
+WEIGHTS_NAME = "adapter_model.bin"
+SAFETENSORS_WEIGHTS_NAME = "adapter_model.safetensors"
+CONFIG_NAME = "adapter_config.json"
+EMBEDDING_LAYER_NAMES = ["embed_tokens", "lm_head"]
+INCLUDE_LINEAR_LAYERS_SHORTHAND = "all-linear"
+TOKENIZER_CONFIG_NAME = "tokenizer_config.json"
diff --git a/venv/lib/python3.10/site-packages/peft/utils/integrations.py b/venv/lib/python3.10/site-packages/peft/utils/integrations.py
new file mode 100644
index 0000000000000000000000000000000000000000..76693701de2f81fdf7535b55d9b7cafbcb75df17
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/peft/utils/integrations.py
@@ -0,0 +1,69 @@
+# Copyright 2023-present the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from contextlib import contextmanager
+
+import packaging.version
+import torch
+import transformers
+
+
+@contextmanager
+def gather_params_ctx(module: torch.nn.Module, modifier_rank: int = 0):
+ """Call DeepSpeed GatheredParameters context manager if DeepSpeed is enabled, otherwise do nothing."""
+ if packaging.version.parse(transformers.__version__) >= packaging.version.parse("4.33.0"):
+ from transformers.integrations import is_deepspeed_zero3_enabled
+ else:
+ from transformers.deepspeed import is_deepspeed_zero3_enabled
+
+ if not is_deepspeed_zero3_enabled():
+ yield
+ return
+
+ import deepspeed
+
+ params_to_gather = module.parameters()
+ with deepspeed.zero.GatheredParameters(params_to_gather, modifier_rank=modifier_rank):
+ yield
+ return
+
+
+def dequantize_bnb_weight(weight: torch.nn.Parameter, state=None):
+ """
+ Helper function to dequantize 4bit or 8bit bnb weights.
+
+ If the weight is not a bnb quantized weight, it will be returned as is.
+ """
+ if not isinstance(weight, torch.nn.Parameter):
+ raise TypeError(f"Input weight should be of type nn.Parameter, got {type(weight)} instead")
+
+ cls_name = weight.__class__.__name__
+ if cls_name not in ("Params4bit", "Int8Params"):
+ return weight
+
+ import bitsandbytes as bnb
+
+ if cls_name == "Params4bit":
+ return bnb.functional.dequantize_4bit(weight.data, weight.quant_state)
+
+ if state.SCB is None:
+ state.SCB = weight.SCB
+
+ im = torch.eye(weight.data.shape[-1]).contiguous().half().to(weight.device)
+ im, imt, SCim, SCimt, coo_tensorim = bnb.functional.double_quant(im)
+ im, Sim = bnb.functional.transform(im, "col32")
+ if state.CxB is None:
+ state.CxB, state.SB = bnb.functional.transform(weight.data, to_order=state.formatB)
+ out32, Sout32 = bnb.functional.igemmlt(im, state.CxB, Sim, state.SB)
+ return bnb.functional.mm_dequant(out32, Sout32, SCim, state.SCB, bias=None).t()
diff --git a/venv/lib/python3.10/site-packages/peft/utils/loftq_utils.py b/venv/lib/python3.10/site-packages/peft/utils/loftq_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..20bbe20adacef45f8e50cd1a4f09200ad741b997
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/peft/utils/loftq_utils.py
@@ -0,0 +1,407 @@
+# Copyright 2023-present the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Reference code: https://github.com/yxli2123/LoftQ/blob/main/utils.py
+# Reference paper: https://arxiv.org/abs/2310.08659
+
+from __future__ import annotations
+
+import logging
+import os
+from typing import Callable, Optional, Union
+
+import torch
+from huggingface_hub import snapshot_download
+from huggingface_hub.utils import LocalEntryNotFoundError
+from safetensors import SafetensorError, safe_open
+from transformers.utils import cached_file
+from transformers.utils.hub import get_checkpoint_shard_files
+
+from peft.import_utils import is_bnb_4bit_available, is_bnb_available
+
+
+if is_bnb_available():
+ import bitsandbytes as bnb
+
+
+class NFQuantizer:
+ def __init__(self, num_bits=2, device="cuda", method="normal", block_size=64, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.num_bits = num_bits
+ self.device = device
+ self.method = method
+ self.block_size = block_size
+ if self.method == "normal":
+ self.norm_lookup_table = self.create_normal_map(num_bits=self.num_bits)
+ self.norm_lookup_table = self.norm_lookup_table.to(device)
+ elif self.method == "uniform":
+ self.norm_lookup_table = self.create_uniform_map(num_bits=self.num_bits)
+ self.norm_lookup_table = self.norm_lookup_table.to(device)
+ else:
+ raise NotImplementedError("Other quantization methods not supported yet.")
+
+ @staticmethod
+ def create_uniform_map(symmetric=False, num_bits=4):
+ if symmetric:
+ # print("symmetric uniform quantization")
+ negative = torch.linspace(-1, 0, 2 ** (num_bits - 1))
+ positive = torch.linspace(0, 1, 2 ** (num_bits - 1))
+ table = torch.cat([negative, positive[1:]])
+ else:
+ # print("asymmetric uniform quantization")
+ table = torch.linspace(-1, 1, 2**num_bits)
+ return table
+
+ @staticmethod
+ def create_normal_map(offset=0.9677083, symmetric=False, num_bits=2):
+ try:
+ from scipy.stats import norm
+ except ImportError:
+ raise ImportError("The required package 'scipy' is not installed. Please install it to continue.")
+
+ variations = 2**num_bits
+ if symmetric:
+ v = norm.ppf(torch.linspace(1 - offset, offset, variations + 1)).tolist()
+ values = []
+ for index in range(len(v) - 1):
+ values.append(0.5 * v[index] + 0.5 * v[index + 1])
+ v = values
+ else:
+ # one more positive value, this is an asymmetric type
+ v1 = norm.ppf(torch.linspace(offset, 0.5, variations // 2 + 1)[:-1]).tolist()
+ v2 = [0]
+ v3 = (-norm.ppf(torch.linspace(offset, 0.5, variations // 2)[:-1])).tolist()
+ v = v1 + v2 + v3
+
+ values = torch.Tensor(v)
+ values = values.sort().values
+ values /= values.max()
+ return values
+
+ def quantize_tensor(self, weight):
+ max_abs = torch.abs(weight).max()
+ weight_normed = weight / max_abs
+
+ weight_normed_expanded = weight_normed.unsqueeze(-1)
+
+ # Reshape L to have the same number of dimensions as X_expanded
+ L_reshaped = torch.tensor(self.norm_lookup_table).reshape(1, -1)
+
+ # Calculate the absolute difference between X_expanded and L_reshaped
+ abs_diff = torch.abs(weight_normed_expanded - L_reshaped)
+
+ # Find the index of the minimum absolute difference for each element
+ qweight = torch.argmin(abs_diff, dim=-1)
+ return qweight, max_abs
+
+ def dequantize_tensor(self, qweight, max_abs):
+ qweight_flatten = qweight.flatten()
+
+ weight_normed = self.norm_lookup_table[qweight_flatten]
+ weight = weight_normed * max_abs
+
+ weight = weight.reshape(qweight.shape)
+
+ return weight
+
+ def quantize_block(self, weight):
+ if len(weight.shape) != 2:
+ raise ValueError(f"Only support 2D matrix, but your input has {len(weight.shape)} dimensions.")
+ if weight.shape[0] * weight.shape[1] % self.block_size != 0:
+ raise ValueError(
+ f"Weight with shape ({weight.shape[0]} x {weight.shape[1]}) "
+ f"is not dividable by block size {self.block_size}."
+ )
+
+ M, N = weight.shape
+ device = weight.device
+
+ # Quantization
+ weight_flatten = weight.flatten() # (M*N, )
+ weight_block = weight_flatten.reshape(-1, self.block_size) # (L, B), L = M * N / B
+ if self.method == "normal":
+ weight_max = weight_block.abs().max(dim=-1)[0] # (L, 1)
+ elif self.method == "uniform":
+ weight_max = weight_block.mean(dim=-1) + 2.5 * weight_block.std(dim=-1)
+ else:
+ raise NotImplementedError("Method not supported yet.")
+ weight_max = weight_max.unsqueeze(-1)
+ weight_divabs = weight_block / weight_max # (L, B)
+ weight_divabs = weight_divabs.unsqueeze(-1) # (L, B, 1)
+ L_reshaped = self.norm_lookup_table.reshape(1, -1) # (1, 2**K)
+
+ abs_diff = torch.abs(weight_divabs - L_reshaped) # (L, B, 2**K)
+ qweight = torch.argmin(abs_diff, dim=-1) # (L, B)
+
+ # Pack multiple k-bit into uint8
+ qweight = qweight.reshape(-1, 8 // self.num_bits)
+ qweight_pack = torch.zeros((M * N // 8 * self.num_bits, 1), dtype=torch.uint8, device=device)
+
+ # data format example:
+ # [1, 0, 3, 2] or [01, 00, 11, 10] -> [10110001], LIFO
+ for i in range(8 // self.num_bits):
+ qweight[:, i] = qweight[:, i] << i * self.num_bits
+ qweight_pack[:, 0] |= qweight[:, i]
+
+ return qweight_pack, weight_max, weight.shape
+
+ def dequantize_block(self, qweight, weight_max, weight_shape):
+ # unpack weight
+ device = qweight.device
+ weight = torch.zeros((qweight.shape[0], 8 // self.num_bits), dtype=torch.float32, device=device)
+ for i in range(8 // self.num_bits):
+ lookup_table_idx = qweight.to(torch.long) % 2**self.num_bits # get the most right 2 bits
+ lookup_table_idx = lookup_table_idx.to(torch.long)
+ weight[:, i] = self.norm_lookup_table[lookup_table_idx].squeeze()
+ qweight = qweight >> self.num_bits # right shift 2 bits of the original data
+
+ weight_block = weight.reshape(-1, self.block_size)
+ weight = weight_block * weight_max
+ weight = weight.reshape(weight_shape)
+
+ return weight
+
+
+def _low_rank_decomposition(weight, reduced_rank=32):
+ """
+ :param weight: The matrix to decompose, of shape (H, W) :param reduced_rank: the final rank :return:
+ """
+ matrix_dimension = len(weight.size())
+ if matrix_dimension != 2:
+ raise ValueError(f"Only support 2D matrix, but your input has {matrix_dimension} dimensions.")
+
+ # Use SVD to decompose a matrix, default full_matrices is False to save parameters
+ U, S, Vh = torch.linalg.svd(weight, full_matrices=False)
+
+ L = U @ (torch.sqrt(torch.diag(S)[:, 0:reduced_rank]))
+ R = torch.sqrt(torch.diag(S)[0:reduced_rank, :]) @ Vh
+
+ return {"L": L, "R": R, "U": U, "S": S, "Vh": Vh, "reduced_rank": reduced_rank}
+
+
+@torch.no_grad()
+def loftq_init(weight: Union[torch.Tensor, torch.nn.Parameter], num_bits: int, reduced_rank: int, num_iter=1):
+ if num_bits not in [2, 4, 8]:
+ raise ValueError("Only support 2, 4, 8 bits quantization")
+ if num_iter <= 0:
+ raise ValueError("Number of iterations must be greater than 0")
+
+ out_feature, in_feature = weight.size()
+ device = weight.device
+ dtype = weight.dtype
+
+ logging.info(
+ f"Weight: ({out_feature}, {in_feature}) | Rank: {reduced_rank} "
+ f"| Num Iter: {num_iter} | Num Bits: {num_bits}"
+ )
+ if not is_bnb_4bit_available() or num_bits in [2, 8]:
+ quantizer = NFQuantizer(num_bits=num_bits, device=device, method="normal", block_size=64)
+ compute_device = device
+ else:
+ compute_device = "cuda"
+
+ weight = weight.to(device=compute_device, dtype=torch.float32)
+ res = weight.clone()
+ for i in range(num_iter):
+ torch.cuda.empty_cache()
+ # Quantization
+ if num_bits == 4 and is_bnb_4bit_available():
+ qweight = bnb.nn.Params4bit(
+ res.to("cpu"), requires_grad=False, compress_statistics=False, quant_type="nf4"
+ ).to(compute_device)
+ dequantized_weight = bnb.functional.dequantize_4bit(qweight.data, qweight.quant_state)
+ else:
+ quantized_weight, max_abs, shape = quantizer.quantize_block(res)
+ dequantized_weight = quantizer.dequantize_block(quantized_weight, max_abs, shape)
+
+ res = weight - dequantized_weight
+
+ # Decompose the residual by SVD
+ output = _low_rank_decomposition(res, reduced_rank=reduced_rank)
+ L, R, reduced_rank = output["L"], output["R"], output["reduced_rank"]
+ res = weight - torch.mm(L, R)
+
+ lora_A, lora_B = R, L
+
+ return dequantized_weight.to(device=device, dtype=dtype), lora_A, lora_B
+
+
+@torch.no_grad()
+def _loftq_init_new(qweight, weight, num_bits: int, reduced_rank: int):
+ if num_bits != 4:
+ raise ValueError("Only 4 bit quantization supported at the moment.")
+ if not is_bnb_4bit_available():
+ raise ValueError("bitsandbytes 4bit quantization is not available.")
+
+ compute_device = "cuda"
+ dequantized_weight = bnb.functional.dequantize_4bit(qweight.data, qweight.quant_state)
+
+ weight = weight.to(device=compute_device, dtype=torch.float32)
+ residual = weight - dequantized_weight
+ torch.cuda.empty_cache()
+ # Decompose the residualidual by SVD
+ output = _low_rank_decomposition(residual, reduced_rank=reduced_rank)
+ L, R, reduced_rank = output["L"], output["R"], output["reduced_rank"]
+ return R, L
+
+
+class _SafetensorLoader:
+ """
+ Simple utility class that loads tensors with safetensors from a single file or sharded files.
+
+ Takes care of file name normalization etc.
+
+ """
+
+ def __init__(self, peft_model, model_path):
+ if model_path is None:
+ try:
+ model_path = snapshot_download(peft_model.base_model.config._name_or_path, local_files_only=True)
+ except AttributeError as exc:
+ raise ValueError(
+ "The provided model does not appear to be a transformers model. In this case, you must pass the "
+ "model_path to the safetensors file."
+ ) from exc
+ except LocalEntryNotFoundError as exc:
+ raise ValueError(
+ "The model.safetensors file must be present on disk, but it could not be found."
+ ) from exc
+
+ suffix = "model.safetensors"
+ if not model_path.endswith(suffix):
+ model_path = os.path.join(model_path, suffix)
+
+ self.model_path = model_path
+ self.base_model_prefix = getattr(peft_model.get_base_model(), "base_model_prefix", None)
+ self.prefix = "base_model.model."
+ self.is_sharded = False
+ self.weight_map = None
+
+ if not os.path.exists(model_path):
+ # check if the file is sharded
+ par_dir = model_path.rpartition(os.path.sep)[0]
+ try:
+ resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(
+ par_dir, cached_file(par_dir, "model.safetensors.index.json")
+ )
+ except OSError as exc:
+ raise FileNotFoundError(
+ f"Could not find file for {model_path}, ensure that there is a (sharded) safetensors file of the model."
+ ) from exc
+
+ self.is_sharded = True
+ # maps from 'model-X-of-Y.safetensors' to full file path
+ file_map = {k.rpartition(os.path.sep)[-1]: k for k in resolved_archive_file}
+ self.weight_map = {k: file_map[v] for k, v in sharded_metadata["weight_map"].items()}
+
+ def get_tensor(self, name):
+ if not self.is_sharded:
+ file_path = self.model_path
+ else:
+ file_path = self.weight_map[name]
+
+ with safe_open(file_path, framework="pt", device="cpu") as f:
+ try:
+ tensor = f.get_tensor(name)
+ except SafetensorError as exc:
+ # no matching key found, we probably need to remove the base model prefix
+ if self.base_model_prefix:
+ # remove 1 extra character for "."
+ name = name[len(self.base_model_prefix) + 1 :]
+ tensor = f.get_tensor(name)
+ else:
+ raise exc
+ return tensor
+
+
+@torch.no_grad()
+def replace_lora_weights_loftq(
+ peft_model,
+ model_path: Optional[str] = None,
+ adapter_name: str = "default",
+ callback: Optional[Callable[[torch.nn.Module, str], bool]] = None,
+):
+ """
+ Replace the LoRA weights of a model quantized with bitsandbytes, using the LoftQ technique.
+
+ The replacement is done on the fly by loading in the non-quantized weights from a locally stored safetensors model
+ file and initializing the LoRA weights such that the quantization error between the original and quantized weights
+ is minimized.
+
+ As lazy loading is not possible with pickle, normal PyTorch checkpoint files cannot be supported.
+
+ Depending on the model size, calling this function may take some time to finish.
+
+ Args:
+ peft_model (`PeftModel`):
+ The model to replace the weights of. Must be a quantized PEFT model with LoRA layers.
+ model_path (`Optional[str]`):
+ The path to the model safetensors file. If the model is a Hugging Face model, this will be inferred from
+ the model's config. Otherwise, it must be provided.
+ adapter_name (`str`):
+ The name of the adapter to replace the weights of. The default adapter name is "default".
+ callback (`Optional[Callable[[PeftModel, str], bool]]`):
+ A callback function that will be called after each module is replaced. The callback function should take
+ the model and the name of the current module as input and return a boolean indicating whether the
+ replacement should be kept. If the callback returns False, the replacement will be rolled back. This can be
+ very useful to confirm that the LoftQ initialization actually decreases the quantization error of the
+ model. As an example, this callback could generate logits for given input and compare it with the logits
+ from the original, non-quanitzed model with the same input, and only return `True` if there is an
+ improvement. As this is a greedy optimization, it's possible that calling this function multiple times
+ yields incremental improvements.
+ """
+ if not is_bnb_4bit_available():
+ raise ValueError("bitsandbytes must be installed and the model must be quantized in 4bits.")
+
+ from peft.tuners.lora import Linear4bit
+
+ # model_path = _check_model_path_loftq(model_path, peft_model)
+ prefix = "base_model.model."
+ any_match = False
+ safetensor_loader = _SafetensorLoader(peft_model, model_path)
+
+ # if too slow, consider adding tqdm as an option
+ for name, module in peft_model.named_modules():
+ if not isinstance(module, Linear4bit):
+ continue
+
+ if not name.startswith(prefix):
+ raise TypeError("The passed model does not appear to be a valid PeftModel")
+
+ any_match = True
+ name = name[len(prefix) :]
+ tensor = safetensor_loader.get_tensor(name + ".weight")
+
+ reduced_rank = module.r[adapter_name]
+ lora_A, lora_B = _loftq_init_new(module.weight, tensor, num_bits=4, reduced_rank=reduced_rank)
+ if not callback:
+ module.lora_A[adapter_name].weight.data = lora_A
+ module.lora_B[adapter_name].weight.data = lora_B
+ continue
+
+ lora_A_before = module.lora_A[adapter_name].weight.data
+ lora_B_before = module.lora_B[adapter_name].weight.data
+
+ module.lora_A[adapter_name].weight.data = lora_A
+ module.lora_B[adapter_name].weight.data = lora_B
+ should_replace = callback(peft_model, name)
+ if not should_replace:
+ # roll back
+ module.lora_A[adapter_name].weight.data = lora_A_before
+ module.lora_B[adapter_name].weight.data = lora_B_before
+
+ del lora_A_before, lora_B_before
+
+ if not any_match:
+ raise ValueError("No bnb LoRA module found on the model")
diff --git a/venv/lib/python3.10/site-packages/peft/utils/merge_utils.py b/venv/lib/python3.10/site-packages/peft/utils/merge_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..900cb878e2d785e5e800caa58642f0532f4d574b
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/peft/utils/merge_utils.py
@@ -0,0 +1,268 @@
+# Copyright 2024-present the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import warnings
+from typing import List, Literal
+
+import torch
+
+
+def reshape_weight_task_tensors(task_tensors, weights):
+ """
+ Reshapes `weights` to match the shape of `task_tensors` by unsqeezing in the remaining dimenions.
+
+ Args:
+ task_tensors (`torch.Tensor`): The tensors that will be used to reshape `weights`.
+ weights (`torch.Tensor`): The tensor to be reshaped.
+
+ Returns:
+ `torch.Tensor`: The reshaped tensor.
+ """
+ new_shape = weights.shape + (1,) * (task_tensors.dim() - weights.dim())
+ weights = weights.view(new_shape)
+ return weights
+
+
+def magnitude_based_pruning(tensor: torch.Tensor, density: float) -> torch.Tensor:
+ """
+ Prune the smallest values of the task tensors and retain the top-k values based on the specified fraction
+ `density`.
+
+ Args:
+ tensor (`torch.Tensor`):The tensor to prune.
+ density (`float`):The fraction of values to preserve. Should be in [0,1].
+
+ Returns:
+ `torch.Tensor`: The tensor with the pruned weights.
+ """
+ mask = torch.zeros_like(tensor).reshape(-1)
+ k = int(density * tensor.numel())
+ top_k = torch.topk(tensor.abs().reshape(-1), k=k, largest=True)
+ mask[top_k[1]] = 1
+ return tensor * mask.reshape(tensor.shape)
+
+
+def random_pruning(tensor: torch.Tensor, density: float, rescale: bool) -> torch.Tensor:
+ """
+ Prune random values based on the specified fraction `density`.
+
+ Args:
+ tensor (`torch.Tensor`):The tensor to prune.
+ density (`float`):The fraction of values to preserve. Should be in [0,1].
+ rescale (`bool`):Whether to rescale the result to preserve the expected value of the original tensor.
+
+ Returns:
+ `torch.Tensor`: The pruned tensor.
+ """
+ mask = torch.bernoulli(torch.full_like(input=tensor, fill_value=density))
+ pruned_tensor = tensor * mask
+ if rescale:
+ torch.div(input=pruned_tensor, other=density)
+ return pruned_tensor
+
+
+def prune(
+ tensor: torch.Tensor, density: float, method: Literal["magnitude", "random"], rescale: bool = False
+) -> torch.Tensor:
+ """
+ Prune the values of task tensors based on the `method`.
+
+ Args:
+ tensor (`torch.Tensor`):The tensor to prune.
+ density (`float`):The fraction of values to preserve. Should be in [0,1].
+ method (`str`):The method to use to prune. Should be one of ["magnitude", "random"].
+ rescale (`bool`):Whether to rescale the result to preserve the expected value of the original tensor.
+
+ Returns:
+ `torch.Tensor`: The pruned tensor.
+ """
+ if density >= 1:
+ warnings.warn(f"The density {density} is greater than or equal to 1, no pruning will be performed.")
+ return tensor
+ elif density < 0:
+ raise ValueError(f"Density should be >= 0, got {density}")
+ if method == "magnitude":
+ return magnitude_based_pruning(tensor, density)
+ elif method == "random":
+ return random_pruning(tensor, density, rescale=rescale)
+ else:
+ raise ValueError(f"Unknown method {method}")
+
+
+def calculate_majority_sign_mask(
+ tensor: torch.Tensor, method: Literal["total", "frequency"] = "total"
+) -> torch.Tensor:
+ """
+ Get the mask of the majority sign across the task tensors. Task tensors are stacked on dimension 0.
+
+ Args:
+ tensor (`torch.Tensor`):The tensor to get the mask from.
+ method (`str`):The method to use to get the mask. Should be one of ["total", "frequency"].
+
+ Returns:
+ `torch.Tensor`: The majority sign mask.
+ """
+
+ sign = tensor.sign()
+ if method == "total":
+ sign_magnitude = tensor.sum(dim=0)
+ elif method == "frequency":
+ sign_magnitude = sign.sum(dim=0)
+ else:
+ raise RuntimeError(f'Unimplemented mask method "{method}"')
+ majority_sign = torch.where(sign_magnitude >= 0, 1, -1)
+ return sign == majority_sign
+
+
+def disjoint_merge(task_tensors: torch.Tensor, majority_sign_mask: torch.Tensor) -> torch.Tensor:
+ """
+ Merge the task tensors using disjoint merge.
+
+ Args:
+ task_tensors (`torch.Tensor`):The task tensors to merge.
+ majority_sign_mask (`torch.Tensor`):The mask of the majority sign across the task tensors.
+
+ Returns:
+ `torch.Tensor`: The merged tensor.
+ """
+ mixed_task_tensors = (task_tensors * majority_sign_mask).sum(dim=0)
+ num_params_preserved = majority_sign_mask.sum(dim=0)
+ return mixed_task_tensors / torch.clamp(num_params_preserved, min=1.0)
+
+
+def task_arithmetic(task_tensors: List[torch.Tensor], weights: torch.Tensor) -> torch.Tensor:
+ """
+ Merge the task tensors using `task arithmetic`.
+
+ Args:
+ task_tensors(`List[torch.Tensor]`):The task tensors to merge.
+ weights (`torch.Tensor`):The weights of the task tensors.
+
+ Returns:
+ `torch.Tensor`: The merged tensor.
+ """
+ task_tensors = torch.stack(task_tensors, dim=0)
+ # weighted task tensors
+ weights = reshape_weight_task_tensors(task_tensors, weights)
+ weighted_task_tensors = task_tensors * weights
+ mixed_task_tensors = weighted_task_tensors.sum(dim=0)
+ return mixed_task_tensors
+
+
+def magnitude_prune(task_tensors: List[torch.Tensor], weights: torch.Tensor, density: float) -> torch.Tensor:
+ """
+ Merge the task tensors using `task arithmetic`.
+
+ Args:
+ task_tensors(`List[torch.Tensor]`):The task tensors to merge.
+ weights (`torch.Tensor`):The weights of the task tensors.
+ density (`float`): The fraction of values to preserve. Should be in [0,1].
+
+ Returns:
+ `torch.Tensor`: The merged tensor.
+ """
+ # sparsify
+ task_tensors = [prune(tensor, density, method="magnitude") for tensor in task_tensors]
+ task_tensors = torch.stack(task_tensors, dim=0)
+ # weighted task tensors
+ weights = reshape_weight_task_tensors(task_tensors, weights)
+ weighted_task_tensors = task_tensors * weights
+ mixed_task_tensors = weighted_task_tensors.sum(dim=0)
+ return mixed_task_tensors
+
+
+def ties(
+ task_tensors: List[torch.Tensor],
+ weights: torch.Tensor,
+ density: float,
+ majority_sign_method: Literal["total", "frequency"] = "total",
+) -> torch.Tensor:
+ """
+ Merge the task tensors using `ties`.
+
+ Args:
+ task_tensors(`List[torch.Tensor]`):The task tensors to merge.
+ weights (`torch.Tensor`):The weights of the task tensors.
+ density (`float`):The fraction of values to preserve. Should be in [0,1].
+ majority_sign_method (`str`):
+ The method to use to get the majority sign mask. Should be one of ["total", "frequency"].
+
+ Returns:
+ `torch.Tensor`: The merged tensor.
+ """
+ # sparsify
+ task_tensors = [prune(tensor, density, method="magnitude") for tensor in task_tensors]
+ task_tensors = torch.stack(task_tensors, dim=0)
+ # Elect Sign
+ majority_sign_mask = calculate_majority_sign_mask(task_tensors, method=majority_sign_method)
+ # weighted task tensors
+ weights = reshape_weight_task_tensors(task_tensors, weights)
+ weighted_task_tensors = task_tensors * weights
+ # Disjoint Merge
+ mixed_task_tensors = disjoint_merge(weighted_task_tensors, majority_sign_mask)
+ return mixed_task_tensors
+
+
+def dare_linear(task_tensors: List[torch.Tensor], weights: torch.Tensor, density: float) -> torch.Tensor:
+ """
+ Merge the task tensors using `dare linear`.
+
+ Args:
+ task_tensors(`List[torch.Tensor]`):The task tensors to merge.
+ weights (`torch.Tensor`):The weights of the task tensors.
+ density (`float`):The fraction of values to preserve. Should be in [0,1].
+
+ Returns:
+ `torch.Tensor`: The merged tensor.
+ """
+ # sparsify
+ task_tensors = [prune(tensor, density, method="random", rescale=True) for tensor in task_tensors]
+ task_tensors = torch.stack(task_tensors, dim=0)
+ # weighted task tensors
+ weights = reshape_weight_task_tensors(task_tensors, weights)
+ weighted_task_tensors = task_tensors * weights
+ mixed_task_tensors = weighted_task_tensors.sum(dim=0)
+ return mixed_task_tensors
+
+
+def dare_ties(
+ task_tensors: List[torch.Tensor],
+ weights: torch.Tensor,
+ density: float,
+ majority_sign_method: Literal["total", "frequency"] = "total",
+) -> torch.Tensor:
+ """
+ Merge the task tensors using `dare ties`.
+
+ Args:
+ task_tensors(`List[torch.Tensor]`):The task tensors to merge.
+ weights (`torch.Tensor`):The weights of the task tensors.
+ density (`float`):The fraction of values to preserve. Should be in [0,1].
+ majority_sign_method (`str`):
+ The method to use to get the majority sign mask. Should be one of ["total", "frequency"].
+
+ Returns:
+ `torch.Tensor`: The merged tensor.
+ """
+ # sparsify
+ task_tensors = [prune(tensor, density, method="random", rescale=True) for tensor in task_tensors]
+ task_tensors = torch.stack(task_tensors, dim=0)
+ # Elect Sign
+ majority_sign_mask = calculate_majority_sign_mask(task_tensors, method=majority_sign_method)
+ # weighted task tensors
+ weights = reshape_weight_task_tensors(task_tensors, weights)
+ weighted_task_tensors = task_tensors * weights
+ # Disjoint Merge
+ mixed_task_tensors = disjoint_merge(weighted_task_tensors, majority_sign_mask)
+ return mixed_task_tensors
diff --git a/venv/lib/python3.10/site-packages/peft/utils/other.py b/venv/lib/python3.10/site-packages/peft/utils/other.py
new file mode 100644
index 0000000000000000000000000000000000000000..05f8d6b12959a483e038ea264de3be075d9446db
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/peft/utils/other.py
@@ -0,0 +1,586 @@
+# Copyright 2023-present the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import copy
+import inspect
+import os
+import warnings
+from contextlib import nullcontext
+from typing import Optional, Tuple
+
+import accelerate
+import torch
+from accelerate.hooks import add_hook_to_module, remove_hook_from_module
+from accelerate.utils import is_npu_available, is_xpu_available
+from huggingface_hub import file_exists
+from huggingface_hub.utils import EntryNotFoundError, HFValidationError
+from safetensors.torch import storage_ptr, storage_size
+
+from ..import_utils import is_auto_gptq_available, is_torch_tpu_available
+from .constants import (
+ CONFIG_NAME,
+ EMBEDDING_LAYER_NAMES,
+ INCLUDE_LINEAR_LAYERS_SHORTHAND,
+ SAFETENSORS_WEIGHTS_NAME,
+ TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING,
+ TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING,
+ TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING,
+ TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING,
+ TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING,
+ WEIGHTS_NAME,
+ bloom_model_postprocess_past_key_value,
+ starcoder_model_postprocess_past_key_value,
+)
+
+
+__all__ = [
+ "CONFIG_NAME",
+ "EMBEDDING_LAYER_NAMES",
+ "SAFETENSORS_WEIGHTS_NAME",
+ "TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING",
+ "TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING",
+ "TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING",
+ "TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING",
+ "TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING",
+ "WEIGHTS_NAME",
+ "INCLUDE_LINEAR_LAYERS_SHORTHAND",
+ "bloom_model_postprocess_past_key_value",
+ "starcoder_model_postprocess_past_key_value",
+]
+
+
+# Get current device name based on available devices
+def infer_device() -> str:
+ if torch.cuda.is_available():
+ return "cuda"
+ elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
+ return "mps"
+ elif is_xpu_available():
+ return "xpu"
+ elif is_npu_available():
+ return "npu"
+ return "cpu"
+
+
+def prepare_model_for_kbit_training(model, use_gradient_checkpointing=True, gradient_checkpointing_kwargs=None):
+ r"""
+ Note this method only works for `transformers` models.
+
+ This method wraps the entire protocol for preparing a model before running a training. This includes:
+ 1- Cast the layernorm in fp32 2- making output embedding layer require grads 3- Add the upcasting of the lm
+ head to fp32
+
+ Args:
+ model (`transformers.PreTrainedModel`):
+ The loaded model from `transformers`
+ use_gradient_checkpointing (`bool`, *optional*, defaults to `True`):
+ If True, use gradient checkpointing to save memory at the expense of slower backward pass.
+ gradient_checkpointing_kwargs (`dict`, *optional*, defaults to `None`):
+ Keyword arguments to pass to the gradient checkpointing function, please refer to the documentation of
+ `torch.utils.checkpoint.checkpoint` for more details about the arguments that you can pass to that method.
+ Note this is only available in the latest transformers versions (> 4.34.1).
+ """
+ loaded_in_kbit = getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False)
+ is_gptq_quantized = getattr(model, "quantization_method", None) == "gptq"
+ is_aqlm_quantized = getattr(model, "quantization_method", None) == "aqlm"
+ if gradient_checkpointing_kwargs is None:
+ gradient_checkpointing_kwargs = {}
+
+ for name, param in model.named_parameters():
+ # freeze base model's layers
+ param.requires_grad = False
+
+ if not is_gptq_quantized and not is_aqlm_quantized:
+ # cast all non INT8 parameters to fp32
+ for param in model.parameters():
+ if (
+ (param.dtype == torch.float16) or (param.dtype == torch.bfloat16)
+ ) and param.__class__.__name__ != "Params4bit":
+ param.data = param.data.to(torch.float32)
+
+ if (loaded_in_kbit or is_gptq_quantized or is_aqlm_quantized) and use_gradient_checkpointing:
+ # When having `use_reentrant=False` + gradient_checkpointing, there is no need for this hack
+ if "use_reentrant" not in gradient_checkpointing_kwargs or gradient_checkpointing_kwargs["use_reentrant"]:
+ # For backward compatibility
+ if hasattr(model, "enable_input_require_grads"):
+ model.enable_input_require_grads()
+ else:
+
+ def make_inputs_require_grad(module, input, output):
+ output.requires_grad_(True)
+
+ model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
+
+ # To support older transformers versions, check if the model supports gradient_checkpointing_kwargs
+ _supports_gc_kwargs = "gradient_checkpointing_kwargs" in list(
+ inspect.signature(model.gradient_checkpointing_enable).parameters
+ )
+
+ if not _supports_gc_kwargs and len(gradient_checkpointing_kwargs) > 0:
+ warnings.warn(
+ "gradient_checkpointing_kwargs is not supported in this version of transformers. The passed kwargs will be ignored."
+ " if you want to use that feature, please upgrade to the latest version of transformers.",
+ FutureWarning,
+ )
+
+ gc_enable_kwargs = (
+ {} if not _supports_gc_kwargs else {"gradient_checkpointing_kwargs": gradient_checkpointing_kwargs}
+ )
+
+ # enable gradient checkpointing for memory efficiency
+ model.gradient_checkpointing_enable(**gc_enable_kwargs)
+ return model
+
+
+# copied from transformers.models.bart.modeling_bart
+def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
+ """
+ Shift input ids one token to the right.
+
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): input ids
+ pad_token_id (`int`): The id of the `padding` token.
+ decoder_start_token_id (`int`): The id of the `start` token.
+ """
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
+ shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
+ shifted_input_ids[:, 0] = decoder_start_token_id
+
+ if pad_token_id is None:
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
+
+ return shifted_input_ids
+
+
+class ModulesToSaveWrapper(torch.nn.Module):
+ def __init__(self, module_to_save, adapter_name):
+ super().__init__()
+ self.original_module = module_to_save
+ self.modules_to_save = torch.nn.ModuleDict({})
+ self._active_adapter = adapter_name
+ self._disable_adapters = False
+ self.update(adapter_name)
+ self.check_module()
+
+ def check_module(self):
+ """Perform some sanity checks on the module to ensure that it works"""
+ # Try to anticipate some modules that users could try to target that would not work.
+ # Note: It's not possible to check hasattr(module, "forward"), since that returns True for ModuleDict and
+ # ModuleList, even though their forward methods cannot be called
+ forbidden_classes = (torch.nn.ModuleDict, torch.nn.ModuleList, torch.nn.ParameterDict, torch.nn.ParameterList)
+ if isinstance(self.original_module, forbidden_classes):
+ cls_name = self.original_module.__class__.__name__
+ raise TypeError(f"modules_to_save cannot be applied to modules of type {cls_name}")
+
+ @property
+ def disable_adapters(self) -> bool:
+ # use a property to ensure that disable_adapters is not set directly, instead use the enable_adapters method
+ return self._disable_adapters
+
+ @property
+ def active_adapter(self) -> str:
+ # use a property to ensure that active_adapter is not set directly, instead use the set_adapter method
+ return self._active_adapter
+
+ @property
+ def weight(self):
+ if self.active_adapter not in self.modules_to_save:
+ return self.original_module.weight
+ return self.modules_to_save[self.active_adapter].weight
+
+ def update(self, adapter_name):
+ context_manager = nullcontext()
+ for _, param in self.original_module.named_parameters():
+ num_params = param.numel()
+ # if using DS Zero 3 and the weights are initialized empty
+ if num_params == 0 and hasattr(param, "ds_numel"):
+ import deepspeed
+
+ context_manager = deepspeed.zero.GatheredParameters(self.original_module.parameters(), modifier_rank=0)
+ break
+ with context_manager:
+ self.modules_to_save.update(torch.nn.ModuleDict({adapter_name: copy.deepcopy(self.original_module)}))
+
+ if hasattr(self.modules_to_save[adapter_name], "_hf_hook"):
+ old_hook = self.modules_to_save[adapter_name]._hf_hook
+ new_hook = self._create_new_hook(old_hook)
+ remove_hook_from_module(self.modules_to_save[adapter_name])
+ add_hook_to_module(self.modules_to_save[adapter_name], new_hook)
+
+ self.original_module.requires_grad_(False)
+ if adapter_name == self.active_adapter:
+ self.modules_to_save[adapter_name].requires_grad_(True)
+
+ def _create_new_hook(self, old_hook):
+ r"""
+ Creates a new hook based on the old hook. Use it only if you know what you are doing !
+ """
+ old_hook_cls = getattr(accelerate.hooks, old_hook.__class__.__name__)
+ old_hook_attr = old_hook.__dict__
+ filtered_old_hook_attr = {}
+ old_hook_init_signature = inspect.signature(old_hook_cls.__init__)
+ for k in old_hook_attr.keys():
+ if k in old_hook_init_signature.parameters:
+ filtered_old_hook_attr[k] = old_hook_attr[k]
+ new_hook = old_hook_cls(**filtered_old_hook_attr)
+ return new_hook
+
+ def forward(self, *args, **kwargs):
+ if self.disable_adapters or (self.active_adapter not in self.modules_to_save):
+ return self.original_module(*args, **kwargs)
+ return self.modules_to_save[self.active_adapter](*args, **kwargs)
+
+ def enable_adapters(self, enabled: bool):
+ """Toggle the enabling and disabling of adapters
+
+ Takes care of setting the requires_grad flag for the adapter weights.
+
+ Args:
+ enabled (bool): True to enable adapters, False to disable adapters
+ """
+ if self._disable_adapters is not enabled:
+ # already in the desired state, do nothing
+ return
+
+ if enabled:
+ self.original_module.requires_grad_(False)
+ self.modules_to_save[self.active_adapter].requires_grad_(True)
+ self._disable_adapters = False
+ else:
+ self.original_module.requires_grad_(True)
+ self.modules_to_save.requires_grad_(False)
+ self._disable_adapters = True
+
+ def set_adapter(self, adapter_name: str):
+ """Set the active adapter
+
+ Additionally, this function will set the specified adapter to trainable (i.e., requires_grad=True). If this is
+ not desired, use the following code.
+
+ ```py
+ >>> for name, param in model_peft.named_parameters():
+ ... if ...: # some check on name (ex. if 'lora' in name)
+ ... param.requires_grad = False
+ ```
+
+ Args:
+ adapter_name (str): The name of the adapter to set as active
+ """
+ if adapter_name not in self.modules_to_save:
+ raise ValueError(f"Adapter {adapter_name} not found in {self.modules_to_save.keys()}")
+
+ self.modules_to_save[self.active_adapter].requires_grad_(False)
+ self.modules_to_save[adapter_name].requires_grad_(True)
+ self._active_adapter = adapter_name
+
+
+def _get_submodules(model, key):
+ parent = model.get_submodule(".".join(key.split(".")[:-1]))
+ target_name = key.split(".")[-1]
+ target = model.get_submodule(key)
+ return parent, target, target_name
+
+
+def _freeze_adapter(model, adapter_name):
+ for n, p in model.named_parameters():
+ if adapter_name in n:
+ p.requires_grad = False
+
+
+def _set_trainable(model, adapter_name):
+ key_list = [key for key, _ in model.named_modules()]
+ for key in key_list:
+ target_module_found = any(key.endswith(target_key) for target_key in model.modules_to_save)
+ if target_module_found:
+ parent, target, target_name = _get_submodules(model, key)
+ if isinstance(target, ModulesToSaveWrapper):
+ target.update(adapter_name)
+ target.set_adapter(target.active_adapter)
+ else:
+ new_module = ModulesToSaveWrapper(target, adapter_name)
+ new_module.set_adapter(adapter_name)
+ setattr(parent, target_name, new_module)
+
+
+def _set_adapter(model, adapter_name):
+ def check_adapter_name(adapter_name):
+ if isinstance(adapter_name, str):
+ return adapter_name
+
+ # adapter_name is a list of str
+ if len(adapter_name) > 1:
+ raise ValueError("Only one adapter can be set at a time for modules_to_save")
+ elif len(adapter_name) == 0:
+ raise ValueError("Please specify at least one adapter to set")
+ adapter_name = adapter_name[0]
+ return adapter_name
+
+ for module in model.modules():
+ if isinstance(module, ModulesToSaveWrapper):
+ # only check the adapter_name if we actually encounter a ModulesToSaveWrapper, otherwise we don't care
+ adapter_name = check_adapter_name(adapter_name)
+ module.set_adapter(adapter_name)
+
+
+def _prepare_prompt_learning_config(peft_config, model_config):
+ if peft_config.num_layers is None:
+ if "num_hidden_layers" in model_config:
+ num_layers = model_config["num_hidden_layers"]
+ elif "num_layers" in model_config:
+ num_layers = model_config["num_layers"]
+ elif "n_layer" in model_config:
+ num_layers = model_config["n_layer"]
+ else:
+ raise ValueError("Please specify `num_layers` in `peft_config`")
+ peft_config.num_layers = num_layers
+
+ if peft_config.token_dim is None:
+ if "hidden_size" in model_config:
+ token_dim = model_config["hidden_size"]
+ elif "n_embd" in model_config:
+ token_dim = model_config["n_embd"]
+ elif "d_model" in model_config:
+ token_dim = model_config["d_model"]
+ else:
+ raise ValueError("Please specify `token_dim` in `peft_config`")
+ peft_config.token_dim = token_dim
+
+ if peft_config.num_attention_heads is None:
+ if "num_attention_heads" in model_config:
+ num_attention_heads = model_config["num_attention_heads"]
+ elif "n_head" in model_config:
+ num_attention_heads = model_config["n_head"]
+ elif "num_heads" in model_config:
+ num_attention_heads = model_config["num_heads"]
+ elif "encoder_attention_heads" in model_config:
+ num_attention_heads = model_config["encoder_attention_heads"]
+ else:
+ raise ValueError("Please specify `num_attention_heads` in `peft_config`")
+ peft_config.num_attention_heads = num_attention_heads
+
+ if getattr(peft_config, "encoder_hidden_size", None) is None:
+ setattr(peft_config, "encoder_hidden_size", peft_config.token_dim)
+
+ return peft_config
+
+
+def fsdp_auto_wrap_policy(model):
+ import functools
+ import os
+
+ from accelerate import FullyShardedDataParallelPlugin
+ from torch.distributed.fsdp.wrap import _or_policy, lambda_auto_wrap_policy, transformer_auto_wrap_policy
+
+ from ..tuners import PrefixEncoder, PromptEmbedding, PromptEncoder
+
+ default_transformer_cls_names_to_wrap = (
+ ",".join(model._no_split_modules) if getattr(model, "_no_split_modules", None) is not None else ""
+ )
+ transformer_cls_names_to_wrap = os.environ.get(
+ "FSDP_TRANSFORMER_CLS_TO_WRAP", default_transformer_cls_names_to_wrap
+ ).split(",")
+ transformer_cls_to_wrap = {PrefixEncoder, PromptEncoder, PromptEmbedding}
+ for layer_class in transformer_cls_names_to_wrap:
+ transformer_cls = FullyShardedDataParallelPlugin.get_module_class_from_name(model, layer_class)
+ if transformer_cls is None:
+ raise Exception("Could not find the transformer layer class to wrap in the model.")
+ else:
+ transformer_cls_to_wrap.add(transformer_cls)
+
+ def lambda_policy_fn(module):
+ if (
+ len(list(module.named_children())) == 0
+ and getattr(module, "weight", None) is not None
+ and module.weight.requires_grad
+ ):
+ return True
+ return False
+
+ lambda_policy = functools.partial(lambda_auto_wrap_policy, lambda_fn=lambda_policy_fn)
+ transformer_wrap_policy = functools.partial(
+ transformer_auto_wrap_policy,
+ transformer_layer_cls=transformer_cls_to_wrap,
+ )
+
+ auto_wrap_policy = functools.partial(_or_policy, policies=[lambda_policy, transformer_wrap_policy])
+ return auto_wrap_policy
+
+
+def transpose(weight, fan_in_fan_out):
+ if not fan_in_fan_out:
+ return weight
+
+ if isinstance(weight, torch.nn.Parameter):
+ return torch.nn.Parameter(weight.T)
+ return weight.T
+
+
+def _is_valid_match(key: str, target_key: str):
+ """
+ Helper function to match module names target_key and key. Makes sure that either the key is exactly the target_key
+ or the target_key is a submodule of key
+ """
+ if key.endswith(target_key):
+ if len(key) > len(target_key):
+ return key.endswith("." + target_key) # must be a sub module
+ return True
+ return False
+
+
+def _get_batch_size(input_ids: Optional[torch.Tensor], inputs_embeds: Optional[torch.Tensor]) -> int:
+ """Get the batch size based on either input_ids or input_embeds
+
+ Raises an ValueError if both are None.
+
+ """
+ if (input_ids is None) and (inputs_embeds is None):
+ raise ValueError("You have to provide either input_ids or inputs_embeds")
+
+ if input_ids is not None:
+ batch_size = input_ids.shape[0]
+ else:
+ batch_size = inputs_embeds.shape[0]
+ return batch_size
+
+
+def get_quantization_config(model: torch.nn.Module, method: str):
+ """
+ Get the quantization config of the related quantization method
+ """
+ if (
+ hasattr(model, "config")
+ and hasattr(model.config, "quantization_config")
+ and (getattr(model, "quantization_method", None) == method)
+ ):
+ return model.config.quantization_config
+ return None
+
+
+def get_auto_gptq_quant_linear(gptq_quantization_config):
+ """
+ Get the right AutoGPTQQuantLinear class based on the quantization config file
+ """
+ if gptq_quantization_config is not None and is_auto_gptq_available():
+ from auto_gptq.utils.import_utils import dynamically_import_QuantLinear
+
+ desc_act = gptq_quantization_config.desc_act
+ group_size = gptq_quantization_config.group_size
+ bits = gptq_quantization_config.bits
+ if hasattr(gptq_quantization_config, "use_exllama"):
+ use_exllama = gptq_quantization_config.use_exllama
+ else:
+ use_exllama = not gptq_quantization_config.disable_exllama
+ if hasattr(gptq_quantization_config, "exllama_config"):
+ exllama_version = gptq_quantization_config.exllama_config["version"]
+ else:
+ exllama_version = 1
+ AutoGPTQQuantLinear = dynamically_import_QuantLinear(
+ use_triton=False,
+ desc_act=desc_act,
+ group_size=group_size,
+ bits=bits,
+ disable_exllama=not (use_exllama and exllama_version == 1),
+ disable_exllamav2=not (use_exllama and exllama_version == 2),
+ )
+ return AutoGPTQQuantLinear
+ return None
+
+
+def id_tensor_storage(tensor: torch.Tensor) -> Tuple[torch.device, int, int]:
+ """
+ Unique identifier to a tensor storage. Multiple different tensors can share the same underlying storage. For
+ example, "meta" tensors all share the same storage, and thus their identifier will all be equal. This identifier is
+ guaranteed to be unique and constant for this tensor's storage during its lifetime. Two tensor storages with
+ non-overlapping lifetimes may have the same id.
+
+ This method is the exact same copy of
+ https://github.com/huggingface/transformers/blob/main/src/transformers/pytorch_utils.py#L282C1-L300C58 but we added
+ it here manually to avoid import issue with old versions of transformers.
+ """
+ if tensor.device.type == "xla" and is_torch_tpu_available():
+ # NOTE: xla tensors dont have storage
+ # use some other unique id to distinguish.
+ # this is a XLA tensor, it must be created using torch_xla's
+ # device. So the following import is safe:
+ import torch_xla
+
+ unique_id = torch_xla._XLAC._xla_get_tensor_id(tensor)
+ else:
+ unique_id = storage_ptr(tensor)
+
+ return tensor.device, unique_id, storage_size(tensor)
+
+
+def cast_mixed_precision_params(model, dtype):
+ """
+ Cast all non-trainable parameters of the model to the given `dtype`. The `dtype` can be `torch.float16` or
+ `torch.bfloat16` as per the mixed-precision training you are performing. The trainable parameters are cast to full
+ precision. This is meant to reduce the GPU memory usage when using PEFT methods by using half-precision dtype for
+ non-trainable parameters. Having the trainable parameters in full-precision preserves training stability when using
+ automatic mixed-precision training.
+
+ Args:
+ model (`torch.nn.Module`):
+ The model to cast the non-trainable parameters of.
+ dtype (`torch.dtype`):
+ The dtype to cast the non-trainable parameters to. The `dtype` can be `torch.float16` or
+ `torch.bfloat16` as per the mixed-precision training you are performing.
+ """
+ for p in model.parameters():
+ if not p.requires_grad:
+ p.data = p.to(dtype)
+ else:
+ p.data = p.to(torch.float32)
+
+
+def str_to_bool(value: str) -> int:
+ """
+ Converts a string representation of truth to `True` (1) or `False` (0).
+
+ True values are `y`, `yes`, `t`, `true`, `on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`;
+ """
+ # same as function as in accelerate.utils, which replaces the deprecated distutils.util.strtobool
+ value = value.lower()
+ if value in ("y", "yes", "t", "true", "on", "1"):
+ return 1
+ elif value in ("n", "no", "f", "false", "off", "0"):
+ return 0
+ else:
+ raise ValueError(f"invalid truth value {value}")
+
+
+def check_file_exists_on_hf_hub(repo_id: str, filename: str, **kwargs) -> Optional[bool]:
+ """Check if a file exists on HF Hub, if check was not successful returns None instead of erroring.
+
+ Respect offline mode if set.
+
+ """
+ exists: Optional[bool] = None
+ if str_to_bool(os.environ.get("HF_HUB_OFFLINE", "0")):
+ # user set offline mode, cannot check
+ return exists
+
+ try:
+ exists = file_exists(repo_id, filename, **kwargs)
+ except (HFValidationError, EntryNotFoundError):
+ # error, exists stays None
+ pass
+ except Exception as e:
+ warnings.warn(
+ f"Unable to fetch remote file due to the following error {e} - silently ignoring the lookup"
+ f" for the file {filename} in {repo_id}."
+ )
+
+ return exists
diff --git a/venv/lib/python3.10/site-packages/peft/utils/peft_types.py b/venv/lib/python3.10/site-packages/peft/utils/peft_types.py
new file mode 100644
index 0000000000000000000000000000000000000000..d4a84435dc60bae6fde33bdbbc5e24f03293c678
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/peft/utils/peft_types.py
@@ -0,0 +1,73 @@
+# flake8: noqa
+# There's no way to ignore "F401 '...' imported but unused" warnings in this
+# module, but to preserve other warnings. So, don't check this module at all
+
+# coding=utf-8
+# Copyright 2023-present the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import enum
+
+
+class PeftType(str, enum.Enum):
+ """
+ Enum class for the different types of adapters in PEFT.
+
+ Supported PEFT types:
+ - PROMPT_TUNING
+ - MULTITASK_PROMPT_TUNING
+ - P_TUNING
+ - PREFIX_TUNING
+ - LORA
+ - ADALORA
+ - ADAPTION_PROMPT
+ - IA3
+ - LOHA
+ - LOKR
+ - OFT
+ """
+
+ PROMPT_TUNING = "PROMPT_TUNING"
+ MULTITASK_PROMPT_TUNING = "MULTITASK_PROMPT_TUNING"
+ P_TUNING = "P_TUNING"
+ PREFIX_TUNING = "PREFIX_TUNING"
+ LORA = "LORA"
+ ADALORA = "ADALORA"
+ ADAPTION_PROMPT = "ADAPTION_PROMPT"
+ IA3 = "IA3"
+ LOHA = "LOHA"
+ LOKR = "LOKR"
+ OFT = "OFT"
+ POLY = "POLY"
+
+
+class TaskType(str, enum.Enum):
+ """
+ Enum class for the different types of tasks supported by PEFT.
+
+ Overview of the supported task types:
+ - SEQ_CLS: Text classification.
+ - SEQ_2_SEQ_LM: Sequence-to-sequence language modeling.
+ - CAUSAL_LM: Causal language modeling.
+ - TOKEN_CLS: Token classification.
+ - QUESTION_ANS: Question answering.
+ - FEATURE_EXTRACTION: Feature extraction. Provides the hidden states which can be used as embeddings or features
+ for downstream tasks.
+ """
+
+ SEQ_CLS = "SEQ_CLS"
+ SEQ_2_SEQ_LM = "SEQ_2_SEQ_LM"
+ CAUSAL_LM = "CAUSAL_LM"
+ TOKEN_CLS = "TOKEN_CLS"
+ QUESTION_ANS = "QUESTION_ANS"
+ FEATURE_EXTRACTION = "FEATURE_EXTRACTION"
diff --git a/venv/lib/python3.10/site-packages/peft/utils/save_and_load.py b/venv/lib/python3.10/site-packages/peft/utils/save_and_load.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ac1264d89afb82d3ab3eadccb49163a75284aaa
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/peft/utils/save_and_load.py
@@ -0,0 +1,330 @@
+# Copyright 2023-present the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os
+import warnings
+from typing import Optional
+
+import torch
+from huggingface_hub import file_exists, hf_hub_download
+from huggingface_hub.utils import EntryNotFoundError
+from safetensors.torch import load_file as safe_load_file
+
+from .other import (
+ EMBEDDING_LAYER_NAMES,
+ SAFETENSORS_WEIGHTS_NAME,
+ WEIGHTS_NAME,
+ check_file_exists_on_hf_hub,
+ infer_device,
+)
+from .peft_types import PeftType
+
+
+def has_valid_embedding_base_layer(layer):
+ """Check if the layer has an embedding base layer"""
+ return hasattr(layer, "base_layer") and isinstance(layer.base_layer, (torch.nn.Linear, torch.nn.Embedding))
+
+
+def get_embedding_layer_name(model, layer, is_embedding_in_target_modules):
+ """Get the name of the embedding module for a given layer."""
+ for name, module in model.named_modules():
+ if (not is_embedding_in_target_modules and module == layer) or module == getattr(layer, "base_layer", None):
+ return name
+ return None
+
+
+def get_peft_model_state_dict(
+ model, state_dict=None, adapter_name="default", unwrap_compiled=False, save_embedding_layers="auto"
+):
+ """
+ Get the state dict of the Peft model.
+
+ Args:
+ model ([`PeftModel`]): The Peft model. When using torch.nn.DistributedDataParallel, DeepSpeed or FSDP,
+ the model should be the underlying model/unwrapped model (i.e. model.module).
+ state_dict (`dict`, *optional*, defaults to `None`):
+ The state dict of the model. If not provided, the state dict of the passed model will be used.
+ adapter_name (`str`, *optional*, defaults to `"default"`):
+ The name of the adapter whose state dict should be returned.
+ unwrap_compiled (`bool`, *optional*, defaults to `False`):
+ Whether to unwrap the model if torch.compile was used.
+ save_embedding_layers (`Union[bool, str]`, , *optional*, defaults to `auto`):
+ If `True`, save the embedding layers in addition to adapter weights. If `auto`, checks the common embedding
+ layers `peft.utils.other.EMBEDDING_LAYER_NAMES` in config's `target_modules` when available. Based on it
+ sets the boolean flag. This only works for 🤗 transformers models.
+ """
+ if unwrap_compiled:
+ model = getattr(model, "_orig_mod", model)
+
+ config = model.peft_config[adapter_name]
+ if state_dict is None:
+ state_dict = model.state_dict()
+ if config.peft_type in (PeftType.LORA, PeftType.ADALORA):
+ # to_return = lora_state_dict(model, bias=model.peft_config.bias)
+ # adapted from `https://github.com/microsoft/LoRA/blob/main/loralib/utils.py`
+ # to be used directly with the state dict which is necessary when using DeepSpeed or FSDP
+ bias = config.bias
+ if bias == "none":
+ to_return = {k: state_dict[k] for k in state_dict if "lora_" in k}
+ elif bias == "all":
+ to_return = {k: state_dict[k] for k in state_dict if "lora_" in k or "bias" in k}
+ elif bias == "lora_only":
+ to_return = {}
+ for k in state_dict:
+ if "lora_" in k:
+ to_return[k] = state_dict[k]
+ bias_name = k.split("lora_")[0] + "bias"
+ if bias_name in state_dict:
+ to_return[bias_name] = state_dict[bias_name]
+ else:
+ raise NotImplementedError
+ to_return = {k: v for k, v in to_return.items() if (("lora_" in k and adapter_name in k) or ("bias" in k))}
+ if config.peft_type == PeftType.ADALORA:
+ rank_pattern = config.rank_pattern
+ if rank_pattern is not None:
+ rank_pattern = {k.replace(f".{adapter_name}", ""): v for k, v in rank_pattern.items()}
+ config.rank_pattern = rank_pattern
+ to_return = model.resize_state_dict_by_rank_pattern(rank_pattern, to_return, adapter_name)
+
+ elif config.peft_type == PeftType.LOHA:
+ to_return = {k: state_dict[k] for k in state_dict if "hada_" in k}
+
+ elif config.peft_type == PeftType.LOKR:
+ to_return = {k: state_dict[k] for k in state_dict if "lokr_" in k}
+
+ elif config.peft_type == PeftType.ADAPTION_PROMPT:
+ to_return = {k: state_dict[k] for k in state_dict if k.split(".")[-1].startswith("adaption_")}
+ elif config.is_prompt_learning:
+ to_return = {}
+ if config.peft_type == PeftType.MULTITASK_PROMPT_TUNING:
+ to_return["prefix_task_cols"] = model.prompt_encoder[adapter_name].prefix_task_cols
+ to_return["prefix_task_rows"] = model.prompt_encoder[adapter_name].prefix_task_rows
+ prompt_embeddings = model.prompt_encoder[adapter_name].embedding.weight
+ else:
+ if config.inference_mode:
+ prompt_embeddings = model.prompt_encoder[adapter_name].embedding.weight
+ else:
+ prompt_embeddings = model.get_prompt_embedding_to_save(adapter_name)
+ to_return["prompt_embeddings"] = prompt_embeddings
+ elif config.peft_type == PeftType.IA3:
+ to_return = {k: state_dict[k] for k in state_dict if "ia3_" in k}
+ elif config.peft_type == PeftType.OFT:
+ to_return = {k: state_dict[k] for k in state_dict if "oft_" in k}
+ elif config.peft_type == PeftType.POLY:
+ to_return = {k: state_dict[k] for k in state_dict if "poly_" in k}
+ else:
+ raise NotImplementedError
+ if getattr(model, "modules_to_save", None) is not None:
+ for key, value in state_dict.items():
+ if any(f"{module_name}.modules_to_save.{adapter_name}" in key for module_name in model.modules_to_save):
+ to_return[key.replace("modules_to_save.", "")] = value
+
+ # check the common embedding layers in `target_modules` to reset `save_embedding_layers` if necessary
+ is_embedding_in_target_modules = False
+ if (
+ save_embedding_layers == "auto"
+ and hasattr(config, "target_modules")
+ and any(k in config.target_modules for k in EMBEDDING_LAYER_NAMES)
+ ):
+ warnings.warn("Setting `save_embedding_layers` to `True` as embedding layers found in `target_modules`.")
+ save_embedding_layers = is_embedding_in_target_modules = True
+ elif save_embedding_layers == "auto":
+ vocab_size = getattr(getattr(model, "config", None), "vocab_size", None)
+ model_id = getattr(config, "base_model_name_or_path", None)
+
+ # For some models e.g. diffusers the text config file is stored in a subfolder
+ # we need to make sure we can download that config.
+ has_remote_config = False
+
+ # ensure that this check is not performed in HF offline mode, see #1452
+ if model_id is not None:
+ exists = check_file_exists_on_hf_hub(model_id, "config.json")
+ if exists is None:
+ # check failed, could not determine if it exists or not
+ warnings.warn(
+ f"Could not find a config file in {model_id} - will assume that the vocabulary was not modified."
+ )
+ has_remote_config = False
+ else:
+ has_remote_config = exists
+
+ # check if the vocab size of the base model is different from the vocab size of the finetuned model
+ if (
+ vocab_size
+ and model_id
+ and has_remote_config
+ and (vocab_size != model.config.__class__.from_pretrained(model_id).vocab_size)
+ ):
+ warnings.warn(
+ "Setting `save_embedding_layers` to `True` as the embedding layer has been resized during finetuning."
+ )
+ save_embedding_layers = True
+ else:
+ save_embedding_layers = False
+
+ if save_embedding_layers and hasattr(model, "get_input_embeddings"):
+ for layer in [model.get_input_embeddings(), model.get_output_embeddings()]:
+ if not is_embedding_in_target_modules or has_valid_embedding_base_layer(layer):
+ # support from version >= 0.6.2
+ embedding_module_name = get_embedding_layer_name(model, layer, is_embedding_in_target_modules)
+ if embedding_module_name:
+ to_return.update({k: v for k, v in state_dict.items() if embedding_module_name in k})
+ elif save_embedding_layers:
+ warnings.warn("Could not identify embedding layer(s) because the model is not a 🤗 transformers model.")
+
+ to_return = {k.replace(f".{adapter_name}", ""): v for k, v in to_return.items()}
+ return to_return
+
+
+def set_peft_model_state_dict(model, peft_model_state_dict, adapter_name="default"):
+ """
+ Set the state dict of the Peft model.
+
+ Args:
+ model ([`PeftModel`]): The Peft model.
+ peft_model_state_dict (`dict`): The state dict of the Peft model.
+ """
+ config = model.peft_config[adapter_name]
+ state_dict = {}
+ if getattr(model, "modules_to_save", None) is not None:
+ for key, value in peft_model_state_dict.items():
+ if any(module_name in key for module_name in model.modules_to_save):
+ for module_name in model.modules_to_save:
+ if module_name in key:
+ key = key.replace(module_name, f"{module_name}.modules_to_save.{adapter_name}")
+ break
+ state_dict[key] = value
+ else:
+ state_dict = peft_model_state_dict
+
+ if config.peft_type in (
+ PeftType.LORA,
+ PeftType.LOHA,
+ PeftType.LOKR,
+ PeftType.ADALORA,
+ PeftType.IA3,
+ PeftType.OFT,
+ PeftType.POLY,
+ ):
+ peft_model_state_dict = {}
+ parameter_prefix = {
+ PeftType.IA3: "ia3_",
+ PeftType.LORA: "lora_",
+ PeftType.ADALORA: "lora_",
+ PeftType.LOHA: "hada_",
+ PeftType.LOKR: "lokr_",
+ PeftType.OFT: "oft_",
+ PeftType.POLY: "poly_",
+ }[config.peft_type]
+ for k, v in state_dict.items():
+ if parameter_prefix in k:
+ suffix = k.split(parameter_prefix)[1]
+ if "." in suffix:
+ suffix_to_replace = ".".join(suffix.split(".")[1:])
+ k = k.replace(suffix_to_replace, f"{adapter_name}.{suffix_to_replace}")
+ else:
+ k = f"{k}.{adapter_name}"
+ peft_model_state_dict[k] = v
+ else:
+ peft_model_state_dict[k] = v
+ if config.peft_type == PeftType.ADALORA:
+ rank_pattern = config.rank_pattern
+ if rank_pattern is not None:
+ model.resize_modules_by_rank_pattern(rank_pattern, adapter_name)
+ elif config.is_prompt_learning or config.peft_type == PeftType.ADAPTION_PROMPT:
+ peft_model_state_dict = state_dict
+ else:
+ raise NotImplementedError
+
+ load_result = model.load_state_dict(peft_model_state_dict, strict=False)
+ if config.is_prompt_learning:
+ model.prompt_encoder[adapter_name].embedding.load_state_dict(
+ {"weight": peft_model_state_dict["prompt_embeddings"]}, strict=True
+ )
+
+ if config.peft_type == PeftType.MULTITASK_PROMPT_TUNING:
+ model.prompt_encoder[adapter_name].load_state_dict(peft_model_state_dict, strict=False)
+ return load_result
+
+
+def load_peft_weights(model_id: str, device: Optional[str] = None, **hf_hub_download_kwargs) -> dict:
+ r"""
+ A helper method to load the PEFT weights from the HuggingFace Hub or locally
+
+ Args:
+ model_id (`str`):
+ The local path to the adapter weights or the name of the adapter to load from the HuggingFace Hub.
+ device (`str`):
+ The device to load the weights onto.
+ hf_hub_download_kwargs (`dict`):
+ Additional arguments to pass to the `hf_hub_download` method when loading from the HuggingFace Hub.
+ """
+ path = (
+ os.path.join(model_id, hf_hub_download_kwargs["subfolder"])
+ if hf_hub_download_kwargs.get("subfolder", None) is not None
+ else model_id
+ )
+
+ if device is None:
+ device = infer_device()
+
+ if os.path.exists(os.path.join(path, SAFETENSORS_WEIGHTS_NAME)):
+ filename = os.path.join(path, SAFETENSORS_WEIGHTS_NAME)
+ use_safetensors = True
+ elif os.path.exists(os.path.join(path, WEIGHTS_NAME)):
+ filename = os.path.join(path, WEIGHTS_NAME)
+ use_safetensors = False
+ else:
+ token = hf_hub_download_kwargs.get("token", None)
+ if token is None:
+ token = hf_hub_download_kwargs.get("use_auth_token", None)
+
+ hub_filename = (
+ os.path.join(hf_hub_download_kwargs["subfolder"], SAFETENSORS_WEIGHTS_NAME)
+ if hf_hub_download_kwargs.get("subfolder", None) is not None
+ else SAFETENSORS_WEIGHTS_NAME
+ )
+ has_remote_safetensors_file = file_exists(
+ repo_id=model_id,
+ filename=hub_filename,
+ revision=hf_hub_download_kwargs.get("revision", None),
+ repo_type=hf_hub_download_kwargs.get("repo_type", None),
+ token=token,
+ )
+ use_safetensors = has_remote_safetensors_file
+
+ if has_remote_safetensors_file:
+ # Priority 1: load safetensors weights
+ filename = hf_hub_download(
+ model_id,
+ SAFETENSORS_WEIGHTS_NAME,
+ **hf_hub_download_kwargs,
+ )
+ else:
+ try:
+ filename = hf_hub_download(model_id, WEIGHTS_NAME, **hf_hub_download_kwargs)
+ except EntryNotFoundError:
+ raise ValueError(
+ f"Can't find weights for {model_id} in {model_id} or in the Hugging Face Hub. "
+ f"Please check that the file {WEIGHTS_NAME} or {SAFETENSORS_WEIGHTS_NAME} is present at {model_id}."
+ )
+
+ if use_safetensors:
+ if hasattr(torch.backends, "mps") and (device == torch.device("mps")):
+ adapters_weights = safe_load_file(filename, device="cpu")
+ else:
+ adapters_weights = safe_load_file(filename, device=device)
+ else:
+ adapters_weights = torch.load(filename, map_location=torch.device(device))
+
+ return adapters_weights