diff --git a/llmeval-env/lib/python3.10/site-packages/peft/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9f38f1a13613d37900e1fa3508f121e91067ea2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/__pycache__/auto.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/__pycache__/auto.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18ee52923b5939cd355646ef324fb990783eec56 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/__pycache__/auto.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/__pycache__/config.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fa7721da2af3093ba3084d66acfb728fd394acd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/__pycache__/config.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/__pycache__/helpers.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/__pycache__/helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a3fd077dfd30316ac237e5f94077c87d64977ec Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/__pycache__/helpers.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/__pycache__/import_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/__pycache__/import_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5a3aa7139226dfeadc521027c42336d740b3b5b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/__pycache__/import_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/__pycache__/mapping.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/__pycache__/mapping.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb7c0ae443fe0e74d4d6f588ff9eec815a46fb29 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/__pycache__/mapping.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/__pycache__/mixed_model.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/__pycache__/mixed_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f85d8556f87c74551a6857d2deab810d75673982 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/__pycache__/mixed_model.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/__pycache__/peft_model.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/__pycache__/peft_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c08118e47ab246247fb8fb1596d2697322f9998d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/__pycache__/peft_model.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/__init__.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b47baa668177ec80b3ec142f1555c5b90f13dcca --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/__init__.py @@ -0,0 +1,32 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all + +# coding=utf-8 +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .adaption_prompt import AdaptionPromptConfig, AdaptionPromptModel +from .lora import LoraConfig, LoraModel, LoftQConfig +from .loha import LoHaConfig, LoHaModel +from .lokr import LoKrConfig, LoKrModel +from .ia3 import IA3Config, IA3Model +from .adalora import AdaLoraConfig, AdaLoraModel +from .p_tuning import PromptEncoder, PromptEncoderConfig, PromptEncoderReparameterizationType +from .prefix_tuning import PrefixEncoder, PrefixTuningConfig +from .prompt_tuning import PromptEmbedding, PromptTuningConfig, PromptTuningInit +from .multitask_prompt_tuning import MultitaskPromptEmbedding, MultitaskPromptTuningConfig, MultitaskPromptTuningInit +from .oft import OFTConfig, OFTModel +from .mixed import MixedModel +from .poly import PolyConfig, PolyModel diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36900b6416e30867d03dbcf9c7f4d89f7108baa0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/__pycache__/lycoris_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/__pycache__/lycoris_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..738b6e5e86e2cd333d61f62f80df55b16b119980 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/__pycache__/lycoris_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/__pycache__/tuners_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/__pycache__/tuners_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6ed1c17df2a3d488eadf3e5a8c646a486de4872 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/__pycache__/tuners_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/__init__.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4bdb8a540bed454fd95633265e8cdceb3e792e3b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/__init__.py @@ -0,0 +1,37 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from peft.import_utils import is_bnb_4bit_available, is_bnb_available + +from .config import AdaLoraConfig +from .gptq import SVDQuantLinear +from .layer import AdaLoraLayer, RankAllocator, SVDLinear +from .model import AdaLoraModel + + +__all__ = ["AdaLoraConfig", "AdaLoraLayer", "AdaLoraModel", "SVDLinear", "RankAllocator", "SVDQuantLinear"] + + +def __getattr__(name): + if (name == "SVDLinear8bitLt") and is_bnb_available(): + from .bnb import SVDLinear8bitLt + + return SVDLinear8bitLt + + if (name == "SVDLinear4bit") and is_bnb_4bit_available(): + from .bnb import SVDLinear4bit + + return SVDLinear4bit + + raise AttributeError(f"module {__name__} has no attribute {name}") diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/bnb.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/bnb.py new file mode 100644 index 0000000000000000000000000000000000000000..b8c32a815cef22b938b840a1b6013592a338936b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/bnb.py @@ -0,0 +1,145 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any + +import torch + +from peft.import_utils import is_bnb_4bit_available, is_bnb_available + +from .layer import AdaLoraLayer + + +if is_bnb_available(): + + class SVDLinear8bitLt(torch.nn.Module, AdaLoraLayer): + # Low-rank matrix for SVD-based adaptation + def __init__( + self, + base_layer: torch.nn.Module, + adapter_name: str, + r: int = 0, + lora_alpha: int = 1, + lora_dropout: float = 0.0, + init_lora_weights: bool = True, + **kwargs, + ) -> None: + super().__init__() + AdaLoraLayer.__init__(self, base_layer) + # Freezing the pre-trained weight matrix + self.get_base_layer().weight.requires_grad = False + + self._active_adapter = adapter_name + self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + # note: no check for self.merged because merging is not supported (yet) + result = self.base_layer(x) + + if self.disable_adapters: + return result + + for active_adapter in self.active_adapters: + if active_adapter not in self.lora_A.keys(): + continue + requires_conversion = not torch.is_autocast_enabled() + if requires_conversion: + expected_dtype = result.dtype + if x.dtype != torch.float32: + x = x.float() + + lora_A = self.lora_A[active_adapter] + lora_B = self.lora_B[active_adapter] + lora_E = self.lora_E[active_adapter] + dropout = self.lora_dropout[active_adapter] + scaling = self.scaling[active_adapter] + ranknum = self.ranknum[active_adapter] + 1e-5 + + output = dropout(x) @ (lora_A * lora_E).T @ lora_B.T + if requires_conversion: + output = output.to(expected_dtype) + output = output * scaling / ranknum + # inplace operation on view is forbidden for MatMul8bitLtBackward, so avoid it + result = result + output + return result + + def __repr__(self) -> str: + rep = super().__repr__() + return "adalora." + rep + + +if is_bnb_4bit_available(): + + class SVDLinear4bit(torch.nn.Module, AdaLoraLayer): + # Low-rank matrix for SVD-based adaptation + def __init__( + self, + base_layer: torch.nn.Module, + adapter_name: str, + r: int = 0, + lora_alpha: int = 1, + lora_dropout: float = 0.0, + init_lora_weights: bool = True, + **kwargs, + ) -> None: + super().__init__() + AdaLoraLayer.__init__(self, base_layer) + # Freezing the pre-trained weight matrix + self.get_base_layer().weight.requires_grad = False + + self._active_adapter = adapter_name + self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights) + + def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: + # note: no check for self.merged because merging is not supported (yet) + result = self.base_layer(x, *args, **kwargs) + + if self.disable_adapters: + return result + + # As per Tim Dettmers, for 4bit, we need to defensively clone here. + # The reason is that in some cases, an error can occur that backprop + # does not work on a manipulated view. This issue may be solved with + # newer PyTorch versions but this would need extensive testing to be + # sure. + result = result.clone() + + for active_adapter in self.active_adapters: + if active_adapter not in self.lora_A.keys(): + continue + + lora_A = self.lora_A[active_adapter] + lora_B = self.lora_B[active_adapter] + lora_E = self.lora_E[active_adapter] + dropout = self.lora_dropout[active_adapter] + scaling = self.scaling[active_adapter] + ranknum = self.ranknum[active_adapter] + 1e-5 + + requires_conversion = not torch.is_autocast_enabled() + if requires_conversion: + expected_dtype = result.dtype + compute_dtype = lora_A.dtype + if x.dtype != compute_dtype: + x = x.to(compute_dtype) + + output = dropout(x) @ (lora_A * lora_E).T @ lora_B.T + if requires_conversion: + output = output.to(expected_dtype) + output = output * scaling / ranknum + result += output + return result + + def __repr__(self) -> str: + rep = super().__repr__() + return "adalora." + rep diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/config.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/config.py new file mode 100644 index 0000000000000000000000000000000000000000..93905ff28b3fb868e71d4a266d5200c46fec7248 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/config.py @@ -0,0 +1,52 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass, field +from typing import Optional + +from peft.tuners.lora import LoraConfig +from peft.utils import PeftType + + +@dataclass +class AdaLoraConfig(LoraConfig): + """ + This is the configuration class to store the configuration of a [`~peft.AdaLora`]. + + Args: + target_r (`int`): The target average rank of incremental matrix. + init_r (`int`): The initial rank for each incremental matrix. + tinit (`int`): The steps of initial fine-tuning warmup. + tfinal (`int`): The step of final fine-tuning. + deltaT (`int`): The time internval between two budget allocations. + beta1 (`float`): The hyperparameter of EMA for sensitivity smoothing. + beta2 (`float`): The hyperparameter of EMA for undertainty quantification. + orth_reg_weight (`float`): The coefficient of orthogonal regularization. + total_step (`int`): The total training steps that should be specified before training. + rank_pattern (`list`): The allocated rank for each weight matrix by RankAllocator. + """ + + target_r: int = field(default=8, metadata={"help": "Target Lora matrix dimension."}) + init_r: int = field(default=12, metadata={"help": "Initial Lora matrix dimension."}) + tinit: int = field(default=0, metadata={"help": "The steps of initial warmup."}) + tfinal: int = field(default=0, metadata={"help": "The steps of final warmup."}) + deltaT: int = field(default=1, metadata={"help": "Step interval of rank allocation."}) + beta1: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."}) + beta2: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."}) + orth_reg_weight: float = field(default=0.5, metadata={"help": "The orthogonal regularization coefficient."}) + total_step: Optional[int] = field(default=None, metadata={"help": "The total training steps."}) + rank_pattern: Optional[dict] = field(default=None, metadata={"help": "The saved rank pattern."}) + + def __post_init__(self): + self.peft_type = PeftType.ADALORA diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/layer.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/layer.py new file mode 100644 index 0000000000000000000000000000000000000000..0fb77aaadf7ac3a89d8d52538117bc28249d07a9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/layer.py @@ -0,0 +1,347 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from typing import Any, List, Optional + +import torch +from torch import nn + +from peft.tuners.lora import LoraLayer +from peft.tuners.tuners_utils import check_adapters_to_merge +from peft.utils import transpose + + +class AdaLoraLayer(LoraLayer): + # List all names of layers that may contain adapter weights + # Note: ranknum doesn't need to be included as it is not an nn.Module + adapter_layer_names = ("lora_A", "lora_B", "lora_E", "lora_embedding_A", "lora_embedding_B") + # other_param_names is defined in LoraLayer + + def __init__(self, base_layer: nn.Module) -> None: + super().__init__(base_layer) + self.lora_E = nn.ParameterDict({}) + self.lora_A = nn.ParameterDict({}) + self.lora_B = nn.ParameterDict({}) + self.ranknum = nn.ParameterDict({}) + + def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights): + if r < 0: + # note: r == 0 is allowed for AdaLora, see #1539 + raise ValueError(f"`r` should be a positive integer or 0, but the value passed is {r}") + + self.r[adapter_name] = r + self.lora_alpha[adapter_name] = lora_alpha + if lora_dropout > 0.0: + lora_dropout_layer = nn.Dropout(p=lora_dropout) + else: + lora_dropout_layer = nn.Identity() + + self.lora_dropout[adapter_name] = lora_dropout_layer + # Actual trainable parameters + # Right singular vectors + self.lora_A[adapter_name] = nn.Parameter(torch.randn(r, self.in_features)) + # Singular values + self.lora_E[adapter_name] = nn.Parameter(torch.randn(r, 1)) + # Left singular vectors + self.lora_B[adapter_name] = nn.Parameter(torch.randn(self.out_features, r)) + # The current rank + self.ranknum[adapter_name] = nn.Parameter(torch.randn(1), requires_grad=False) + self.ranknum[adapter_name].data.fill_(float(r)) + self.ranknum[adapter_name].requires_grad = False + self.scaling[adapter_name] = lora_alpha if lora_alpha > 0 else float(r) + if init_lora_weights: + self.reset_lora_parameters(adapter_name) + + if hasattr(self.get_base_layer(), "qweight"): + # QuantLinear + self.to(self.get_base_layer().qweight.device) + else: + self.to(self.get_base_layer().weight.device) + self.set_adapter(self.active_adapters) + + def reset_lora_parameters(self, adapter_name): + if adapter_name in self.lora_A.keys(): + nn.init.normal_(self.lora_E[adapter_name], mean=0.0, std=0.02) + nn.init.normal_(self.lora_A[adapter_name], mean=0.0, std=0.02) + nn.init.normal_(self.lora_B[adapter_name], mean=0.0, std=0.02) + + +class SVDLinear(nn.Module, AdaLoraLayer): + # SVD-based adaptation by a dense layer + def __init__( + self, + base_layer: nn.Module, + adapter_name: str, + r: int = 0, + lora_alpha: int = 1, + lora_dropout: float = 0.0, + fan_in_fan_out: bool = False, + init_lora_weights: bool = True, + **kwargs, + ) -> None: + super().__init__() + AdaLoraLayer.__init__(self, base_layer) + # Freezing the pre-trained weight matrix + self.get_base_layer().weight.requires_grad = False + + self.fan_in_fan_out = fan_in_fan_out + self._active_adapter = adapter_name + self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights) + + def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None: + """ + Merge the active adapter weights into the base weights + + Args: + safe_merge (`bool`, *optional*): + If True, the merge operation will be performed in a copy of the original weights and check for NaNs + before merging the weights. This is useful if you want to check if the merge operation will produce + NaNs. Defaults to `False`. + adapter_names (`List[str]`, *optional*): + The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults + to `None`. + """ + adapter_names = check_adapters_to_merge(self, adapter_names) + if not adapter_names: + # no adapter to merge + return + + for active_adapter in adapter_names: + base_layer = self.get_base_layer() + if active_adapter in self.lora_A.keys(): + if safe_merge: + # Note that safe_merge will be slower than the normal merge + # because of the copy operation. + orig_weights = base_layer.weight.data.clone() + orig_weights += self.get_delta_weight(active_adapter) + + if not torch.isfinite(orig_weights).all(): + raise ValueError( + f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" + ) + + base_layer.weight.data = orig_weights + else: + base_layer.weight.data += self.get_delta_weight(active_adapter) + self.merged_adapters.append(active_adapter) + + def unmerge(self) -> None: + """ + This method unmerges all merged adapter layers from the base weights. + """ + if not self.merged: + warnings.warn("Already unmerged. Nothing to do.") + return + while len(self.merged_adapters) > 0: + active_adapter = self.merged_adapters.pop() + if active_adapter in self.lora_A.keys(): + self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter) + + def get_delta_weight(self, adapter) -> torch.Tensor: + return ( + transpose(self.lora_B[adapter] @ (self.lora_A[adapter] * self.lora_E[adapter]), self.fan_in_fan_out) + * self.scaling[adapter] + / (self.ranknum[adapter] + 1e-5) + ) + + def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: + if self.disable_adapters: + if self.merged: + self.unmerge() + result = self.base_layer(x, *args, **kwargs) + elif self.merged: + result = self.base_layer(x, *args, **kwargs) + else: + result = self.base_layer(x, *args, **kwargs) + for active_adapter in self.active_adapters: + if active_adapter not in self.lora_A.keys(): + continue + lora_A = self.lora_A[active_adapter] + lora_B = self.lora_B[active_adapter] + lora_E = self.lora_E[active_adapter] + dropout = self.lora_dropout[active_adapter] + scaling = self.scaling[active_adapter] + ranknum = self.ranknum[active_adapter] + 1e-5 + + x = x.to(lora_A.dtype) + result += (dropout(x) @ (lora_A * lora_E).T @ lora_B.T) * scaling / ranknum + + return result + + def __repr__(self) -> str: + rep = super().__repr__() + return "adalora." + rep + + +class RankAllocator: + """ + The RankAllocator for AdaLoraModel. Paper: https://openreview.net/pdf?id=lq62uWRJjiY + + Args: + config ([`AdaLoraConfig`]): The configuration of the AdaLora model. + model: the model that we apply AdaLoRA to. + + """ + + def __init__(self, model, peft_config, adapter_name): + self.peft_config = peft_config + self.adapter_name = adapter_name + self.beta1 = peft_config.beta1 + self.beta2 = peft_config.beta2 + assert self.beta1 > 0 and self.beta1 < 1 + assert self.beta2 > 0 and self.beta2 < 1 + + self.reset_ipt() + self._set_budget_scheduler(model) + + def set_total_step(self, total_step): + self.peft_config.total_step = total_step + + def reset_ipt(self): + self.ipt = {} + self.exp_avg_ipt = {} + self.exp_avg_unc = {} + + def _set_budget_scheduler(self, model): + self.init_bgt = 0 + self.name_set = set() + for n, p in model.named_parameters(): + if f"lora_A.{self.adapter_name}" in n: + self.init_bgt += p.size(0) + self.name_set.add(n.replace("lora_A", "%s")) + self.name_set = sorted(self.name_set) + # The total final rank budget + self.target_bgt = self.peft_config.target_r * len(self.name_set) + + def budget_schedule(self, step: int): + tinit = self.peft_config.tinit + tfinal = self.peft_config.tfinal + total_step = self.peft_config.total_step + # Initial warmup + if step <= tinit: + budget = self.init_bgt + mask_ind = False + # Final fine-tuning + elif step > total_step - tfinal: + budget = self.target_bgt + mask_ind = True + else: + # Budget decreasing with a cubic scheduler + mul_coeff = 1 - (step - tinit) / (total_step - tfinal - tinit) + budget = int((self.init_bgt - self.target_bgt) * (mul_coeff**3) + self.target_bgt) + mask_ind = True if step % self.peft_config.deltaT == 0 else False + return budget, mask_ind + + def update_ipt(self, model): + # Update the sensitivity and uncertainty for every weight + for n, p in model.named_parameters(): + if "lora_" in n and self.adapter_name in n: + if n not in self.ipt: + self.ipt[n] = torch.zeros_like(p) + self.exp_avg_ipt[n] = torch.zeros_like(p) + self.exp_avg_unc[n] = torch.zeros_like(p) + with torch.no_grad(): + self.ipt[n] = (p * p.grad).abs().detach() + # Sensitivity smoothing + self.exp_avg_ipt[n] = self.beta1 * self.exp_avg_ipt[n] + (1 - self.beta1) * self.ipt[n] + # Uncertainty quantification + self.exp_avg_unc[n] = ( + self.beta2 * self.exp_avg_unc[n] + (1 - self.beta2) * (self.ipt[n] - self.exp_avg_ipt[n]).abs() + ) + + def _element_score(self, n): + return self.exp_avg_ipt[n] * self.exp_avg_unc[n] + + def _combine_ipt(self, ipt_E, ipt_AB): + ipt_AB = ipt_AB.sum(dim=1, keepdim=False) + sum_ipt = ipt_E.view(-1) + ipt_AB.view(-1) + return sum_ipt + + def mask_to_budget(self, model, budget): + value_ipt = {} + vector_ipt = {} + triplet_ipt = {} + # Get the importance score for A, E, B + for n, p in model.named_parameters(): + if f"lora_A.{self.adapter_name}" in n: + entry_ipt = self._element_score(n) + comb_ipt = torch.mean(entry_ipt, dim=1, keepdim=True) + name_m = n.replace("lora_A", "%s") + if name_m not in vector_ipt: + vector_ipt[name_m] = [comb_ipt] + else: + vector_ipt[name_m].append(comb_ipt) + if f"lora_B.{self.adapter_name}" in n: + entry_ipt = self._element_score(n) + comb_ipt = torch.mean(entry_ipt, dim=0, keepdim=False).view(-1, 1) + name_m = n.replace("lora_B", "%s") + if name_m not in vector_ipt: + vector_ipt[name_m] = [comb_ipt] + else: + vector_ipt[name_m].append(comb_ipt) + if f"lora_E.{self.adapter_name}" in n: + entry_ipt = self._element_score(n) + name_m = n.replace("lora_E", "%s") + value_ipt[name_m] = entry_ipt + + all_score = [] + # Calculate the score for each triplet + for name_m in vector_ipt: + ipt_E = value_ipt[name_m] + ipt_AB = torch.cat(vector_ipt[name_m], dim=1) + sum_ipt = self._combine_ipt(ipt_E, ipt_AB) + name_E = name_m % "lora_E" + triplet_ipt[name_E] = sum_ipt.view(-1, 1) + all_score.append(sum_ipt.view(-1)) + + # Get the threshold by ranking ipt + mask_threshold = torch.kthvalue( + torch.cat(all_score), + k=self.init_bgt - budget, + )[0].item() + + rank_pattern = {} + # Mask the unimportant triplets + with torch.no_grad(): + for n, p in model.named_parameters(): + if f"lora_E.{self.adapter_name}" in n: + p.masked_fill_(triplet_ipt[n] <= mask_threshold, 0.0) + rank_pattern[n] = (~(triplet_ipt[n] <= mask_threshold)).view(-1).tolist() + return rank_pattern + + def update_and_allocate(self, model, global_step, force_mask=False): + # # Update the importance score and allocate the budget + if global_step < self.peft_config.total_step - self.peft_config.tfinal: + self.update_ipt(model) + budget, mask_ind = self.budget_schedule(global_step) + # Allocate the budget according to importance scores + if mask_ind or force_mask: + rank_pattern = self.mask_to_budget(model, budget) + else: + rank_pattern = None + return budget, rank_pattern + + def mask_using_rank_pattern(self, model, rank_pattern): + # Mask the unimportant triplets + is_adapter_name_truncated = False + if self.adapter_name not in next(iter(rank_pattern.keys())): + is_adapter_name_truncated = True + + with torch.no_grad(): + for n, p in model.named_parameters(): + if f"lora_E.{self.adapter_name}" in n: + key = n if not is_adapter_name_truncated else n.replace(f".{self.adapter_name}", "") + mask = torch.Tensor(rank_pattern[key]).unsqueeze(-1).to(p.device) + p.masked_fill_(~mask.bool(), 0.0) diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/model.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/model.py new file mode 100644 index 0000000000000000000000000000000000000000..bf334b39ce6caf74c05e81b180fea31363a2a62e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/model.py @@ -0,0 +1,346 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings + +import torch +from transformers.pytorch_utils import Conv1D + +from peft.import_utils import is_bnb_4bit_available, is_bnb_available +from peft.tuners.lora import LoraConfig, LoraModel +from peft.tuners.tuners_utils import BaseTunerLayer +from peft.utils import ( + TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING, + _freeze_adapter, + _get_submodules, + get_auto_gptq_quant_linear, + get_quantization_config, +) + +from .gptq import SVDQuantLinear +from .layer import AdaLoraLayer, RankAllocator, SVDLinear + + +class AdaLoraModel(LoraModel): + """ + Creates AdaLoRA (Adaptive LoRA) model from a pretrained transformers model. Paper: + https://openreview.net/forum?id=lq62uWRJjiY + + Args: + model ([`transformers.PreTrainedModel`]): The model to be adapted. + config ([`AdaLoraConfig`]): The configuration of the AdaLora model. + adapter_name (`str`): The name of the adapter, defaults to `"default"`. + + Returns: + `torch.nn.Module`: The AdaLora model. + + Example:: + + >>> from transformers import AutoModelForSeq2SeqLM, LoraConfig >>> from peft import AdaLoraModel, AdaLoraConfig + >>> config = AdaLoraConfig( + peft_type="ADALORA", task_type="SEQ_2_SEQ_LM", r=8, lora_alpha=32, target_modules=["q", "v"], + lora_dropout=0.01, + ) + >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> model = AdaLoraModel(model, config, "default") + + **Attributes**: + - **model** ([`transformers.PreTrainedModel`]) -- The model to be adapted. + - **peft_config** ([`AdaLoraConfig`]): The configuration of the AdaLora model. + """ + + # Note: don't redefine prefix here, it should be inherited from LoraModel + + def __init__(self, model, config, adapter_name): + super().__init__(model, config, adapter_name) + + traininable_mode_counter = 0 + for config in self.peft_config.values(): + if not config.inference_mode: + traininable_mode_counter += 1 + + if traininable_mode_counter > 1: + raise ValueError( + "AdaLoraModel supports only 1 trainable adapter. " + "When using multiple adapters, set inference_mode to True for all adapters except the one you want to train." + ) + + if self.peft_config[adapter_name].inference_mode: + _freeze_adapter(self.model, adapter_name) + else: + self.trainable_adapter_name = adapter_name + self.rankallocator = RankAllocator(self.model, self.peft_config[adapter_name], self.trainable_adapter_name) + + def _check_new_adapter_config(self, config: LoraConfig) -> None: + """ + A helper method to check the config when a new adapter is being added. + + Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters. + + """ + super()._check_new_adapter_config(config) + + traininable_mode_counter = 0 + for config_ in self.peft_config.values(): + if not config_.inference_mode: + traininable_mode_counter += 1 + + if traininable_mode_counter > 1: + raise ValueError( + f"{self.__class__.__name__} supports only 1 trainable adapter. " + "When using multiple adapters, set inference_mode to True for all adapters except the one " + "you want to train." + ) + + def _create_and_replace( + self, + lora_config, + adapter_name, + target, + target_name, + parent, + current_key, + ): + kwargs = { + "r": lora_config.init_r, + "lora_alpha": lora_config.lora_alpha, + "lora_dropout": lora_config.lora_dropout, + "fan_in_fan_out": lora_config.fan_in_fan_out, + "init_lora_weights": lora_config.init_lora_weights, + "loaded_in_8bit": getattr(self.model, "is_loaded_in_8bit", False), + "loaded_in_4bit": getattr(self.model, "is_loaded_in_4bit", False), + } + if (kwargs["loaded_in_8bit"] or kwargs["loaded_in_4bit"]) and not is_bnb_available(): + raise ImportError( + "To use AdaLora with 8-bit quantization, please install the `bitsandbytes` package. " + "You can install it with `pip install bitsandbytes`." + ) + + quantization_config = get_quantization_config(self.model, method="gptq") + if quantization_config is not None: + kwargs["gptq_quantization_config"] = quantization_config + + # If it is not an AdaLoraLayer, create a new module, else update it with new adapters + if not isinstance(target, AdaLoraLayer): + new_module = self._create_new_module(lora_config, adapter_name, target, **kwargs) + if adapter_name != self.active_adapter: + # adding an additional adapter: it is not automatically trainable + new_module.requires_grad_(False) + self._replace_module(parent, target_name, new_module, target) + else: + target.update_layer( + adapter_name, + lora_config.init_r, + lora_config.lora_alpha, + lora_config.lora_dropout, + lora_config.init_lora_weights, + ) + + @staticmethod + def _create_new_module(lora_config, adapter_name, target, **kwargs): + # avoid eager bnb import + if is_bnb_available(): + import bitsandbytes as bnb + + from .bnb import SVDLinear8bitLt + if is_bnb_4bit_available(): + from .bnb import SVDLinear4bit + + gptq_quantization_config = kwargs.get("gptq_quantization_config", None) + AutoGPTQQuantLinear = get_auto_gptq_quant_linear(gptq_quantization_config) + + loaded_in_8bit = kwargs.pop("loaded_in_8bit", False) + loaded_in_4bit = kwargs.pop("loaded_in_4bit", False) + + if isinstance(target, BaseTunerLayer): + target_base_layer = target.get_base_layer() + else: + target_base_layer = target + + if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt): + kwargs.update( + { + "has_fp16_weights": target_base_layer.state.has_fp16_weights, + "memory_efficient_backward": target_base_layer.state.memory_efficient_backward, + "threshold": target_base_layer.state.threshold, + "index": target_base_layer.index, + } + ) + new_module = SVDLinear8bitLt(target, adapter_name, **kwargs) + elif loaded_in_4bit and is_bnb_4bit_available() and isinstance(target_base_layer, bnb.nn.Linear4bit): + fourbit_kwargs = kwargs.copy() + fourbit_kwargs.update( + { + "compute_dtype": target_base_layer.compute_dtype, + "compress_statistics": target_base_layer.weight.compress_statistics, + "quant_type": target_base_layer.weight.quant_type, + } + ) + new_module = SVDLinear4bit(target, adapter_name, **fourbit_kwargs) + elif AutoGPTQQuantLinear is not None and isinstance(target, AutoGPTQQuantLinear): + new_module = SVDQuantLinear(target, adapter_name, **kwargs) + else: + if isinstance(target_base_layer, torch.nn.Linear): + if kwargs["fan_in_fan_out"]: + warnings.warn( + "fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. " + "Setting fan_in_fan_out to False." + ) + kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False + elif isinstance(target_base_layer, Conv1D): + if not kwargs["fan_in_fan_out"]: + warnings.warn( + "fan_in_fan_out is set to False but the target module is `Conv1D`. " + "Setting fan_in_fan_out to True." + ) + kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = True + else: + raise ValueError( + f"Target module {target} is not supported. " + f"Currently, only `torch.nn.Linear` and `Conv1D` are supported." + ) + new_module = SVDLinear(target, adapter_name, **kwargs) + + return new_module + + @staticmethod + def _prepare_adapter_config(peft_config, model_config): + if peft_config.target_modules is None: + if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING: + raise ValueError("Please specify `target_modules` in `peft_config`") + peft_config.target_modules = TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING[ + model_config["model_type"] + ] + return peft_config + + def __getattr__(self, name: str): + """Forward missing attributes to the wrapped module.""" + try: + return super().__getattr__(name) # defer to nn.Module's logic + except AttributeError: + return getattr(self.model, name) + + def forward(self, *args, **kwargs): + outputs = self.model.forward(*args, **kwargs) + + if (getattr(outputs, "loss", None) is not None) and isinstance(outputs.loss, torch.Tensor): + # Calculate the orthogonal regularization + orth_reg_weight = self.peft_config[self.trainable_adapter_name].orth_reg_weight + + if orth_reg_weight <= 0: + raise ValueError("orth_reg_weight should be greater than 0. ") + + regu_loss = 0 + num_param = 0 + for n, p in self.model.named_parameters(): + if ("lora_A" in n or "lora_B" in n) and self.trainable_adapter_name in n: + para_cov = p @ p.T if "lora_A" in n else p.T @ p + I = torch.eye(*para_cov.size(), out=torch.empty_like(para_cov)) # noqa: E741 + I.requires_grad = False + num_param += 1 + regu_loss += torch.norm(para_cov - I, p="fro") + if num_param > 0: + regu_loss = regu_loss / num_param + else: + regu_loss = 0 + outputs.loss += orth_reg_weight * regu_loss + return outputs + + def resize_modules_by_rank_pattern(self, rank_pattern, adapter_name): + lora_config = self.peft_config[adapter_name] + for name, rank_idx in rank_pattern.items(): + if isinstance(rank_idx, list): + rank = sum(rank_idx) + elif isinstance(rank_idx, torch.Tensor): + rank_idx = rank_idx.view(-1) + rank = rank_idx.sum().item() + else: + raise ValueError("Unexpected type of rank_idx") + key = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1]) + _, target, _ = _get_submodules(self.model, key) + lora_E_weights = target.lora_E[adapter_name][rank_idx] + lora_A_weights = target.lora_A[adapter_name][rank_idx] + lora_B_weights = target.lora_B[adapter_name][:, rank_idx] + ranknum = target.ranknum[adapter_name] + target.update_layer( + adapter_name, + rank, + lora_config.lora_alpha, + lora_config.lora_dropout, + lora_config.init_lora_weights, + ) + with torch.no_grad(): + if rank > 0: + target.lora_E[adapter_name].copy_(lora_E_weights) + target.lora_A[adapter_name].copy_(lora_A_weights) + target.lora_B[adapter_name].copy_(lora_B_weights) + # The scaling is exactly as the previous + target.ranknum[adapter_name].copy_(ranknum) + + def resize_state_dict_by_rank_pattern(self, rank_pattern, state_dict, adapter_name): + for name, rank_idx in rank_pattern.items(): + rank = sum(rank_idx) + prefix = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1]) + for layer in ["lora_E", "lora_A", "lora_B"]: + key = f"base_model.model.{prefix}.{layer}.{adapter_name}" + if layer != "lora_B": + state_dict[key] = ( + state_dict[key][rank_idx] if rank != state_dict[key].shape[0] else state_dict[key] + ) + else: + state_dict[key] = ( + state_dict[key][:, rank_idx] if rank != state_dict[key].shape[1] else state_dict[key] + ) + return state_dict + + def update_and_allocate(self, global_step): + """ + This method updates Adalora budget and mask. + + This should be called in every training step after `loss.backward()` and before `zero_grad()`. + + `tinit`, `tfinal` and `deltaT` are handled with in the method. + + Args: + global_step (`int`): The current training step, it is used to calculate adalora budget. + + Example: + + ```python + >>> loss = model(**input).loss + >>> loss.backward() + >>> optimizer.step() + >>> model.base_model.update_and_allocate(i_step) + >>> optimizer.zero_grad() + ``` + """ + lora_config = self.peft_config[self.trainable_adapter_name] + # Update the importance score and allocate the budget + if global_step < lora_config.total_step - lora_config.tfinal: + _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step) + if rank_pattern: + lora_config.rank_pattern = rank_pattern + # Finalize the budget allocation + elif global_step == lora_config.total_step - lora_config.tfinal: + _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step, force_mask=True) + # for some reason, this freezes the trainable parameters and nothing gets updates + # self.resize_modules_by_rank_pattern(rank_pattern, self.trainable_adapter_name) + lora_config.rank_pattern = rank_pattern + self.rankallocator.reset_ipt() + # Currently using inefficient way to mask the unimportant weights using the rank pattern + # due to problem mentioned above + elif global_step > lora_config.total_step - lora_config.tfinal: + self.rankallocator.mask_using_rank_pattern(self.model, lora_config.rank_pattern) + # Pass the function and do forward propagation + else: + return None diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f979ba9ccaee4e716ef9d9285c77a43ea2f94178 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/bnb.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/bnb.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..beeededfe50934ea13cdbb3e2e83e5daa44082cc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/bnb.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/config.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3863820918ef7888135d3086ca939edc3c8bdf6b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/config.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/layer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/layer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81b521c67b25e275d9a000486af66fbf0645c946 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/layer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/model.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d03d655d36b7aa1b65d175204d9f27a31964fb86 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/model.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/model.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/model.py new file mode 100644 index 0000000000000000000000000000000000000000..61969fe698d03a34559aa15ccc377fbbc97cace8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/model.py @@ -0,0 +1,394 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import re +import warnings +from dataclasses import asdict +from enum import Enum +from typing import Optional + +import torch +from torch import nn +from transformers.pytorch_utils import Conv1D + +from peft.import_utils import is_bnb_4bit_available, is_bnb_available +from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists +from peft.utils import ( + TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING, + TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING, + ModulesToSaveWrapper, + _get_submodules, +) + +from .layer import Conv2d, IA3Layer, Linear + + +class IA3Model(BaseTuner): + """ + Creates a Infused Adapter by Inhibiting and Amplifying Inner Activations ((IA)^3) model from a pretrained + transformers model. The method is described in detail in https://arxiv.org/abs/2205.05638 + + Args: + model ([`~transformers.PreTrainedModel`]): The model to be adapted. + config ([`IA3Config`]): The configuration of the (IA)^3 model. + adapter_name (`str`): The name of the adapter, defaults to `"default"`. + + Returns: + `torch.nn.Module`: The (IA)^3 model. + + Example: + + ```py + >>> from transformers import AutoModelForSeq2SeqLM, ia3Config + >>> from peft import IA3Model, IA3Config + + >>> config = IA3Config( + ... peft_type="IA3", + ... task_type="SEQ_2_SEQ_LM", + ... target_modules=["k", "v", "w0"], + ... feedforward_modules=["w0"], + ... ) + + >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") + >>> ia3_model = IA3Model(config, model) + ``` + + **Attributes**: + - **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted. + - **peft_config** ([`ia3Config`]): The configuration of the (IA)^3 model. + """ + + prefix: str = "ia3_" + + def __init__(self, model, config, adapter_name): + super().__init__(model, config, adapter_name) + + @staticmethod + def _create_new_module(ia3_config, adapter_name, target, **kwargs): + # avoid eager bnb import + if is_bnb_available(): + import bitsandbytes as bnb + + from .bnb import Linear8bitLt + + if is_bnb_4bit_available(): + from .bnb import Linear4bit + + loaded_in_8bit = kwargs.pop("loaded_in_8bit", False) + loaded_in_4bit = kwargs.pop("loaded_in_4bit", False) + is_feedforward = kwargs.pop("is_feedforward", False) + + if isinstance(target, BaseTunerLayer): + target_base_layer = target.get_base_layer() + else: + target_base_layer = target + + if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt): + eightbit_kwargs = kwargs.copy() + eightbit_kwargs.update( + { + "has_fp16_weights": target_base_layer.state.has_fp16_weights, + "memory_efficient_backward": target_base_layer.state.memory_efficient_backward, + "threshold": target_base_layer.state.threshold, + "index": target_base_layer.index, + } + ) + new_module = Linear8bitLt(target, adapter_name, is_feedforward=is_feedforward, **eightbit_kwargs) + elif loaded_in_4bit and isinstance(target_base_layer, bnb.nn.Linear4bit): + fourbit_kwargs = kwargs.copy() + fourbit_kwargs.update( + { + "compute_dtype": target_base_layer.compute_dtype, + "compress_statistics": target_base_layer.weight.compress_statistics, + "quant_type": target_base_layer.weight.quant_type, + } + ) + new_module = Linear4bit(target, adapter_name, is_feedforward=is_feedforward, **fourbit_kwargs) + elif isinstance(target, torch.nn.Conv2d): + new_module = Conv2d(target, adapter_name, is_feedforward=is_feedforward, **kwargs) + elif isinstance(target_base_layer, torch.nn.Linear): + if kwargs["fan_in_fan_out"]: + warnings.warn( + "fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. " + "Setting fan_in_fan_out to False." + ) + kwargs["fan_in_fan_out"] = ia3_config.fan_in_fan_out = False + new_module = Linear(target, adapter_name, is_feedforward=is_feedforward, **kwargs) + elif isinstance(target_base_layer, Conv1D): + if not kwargs["fan_in_fan_out"]: + warnings.warn( + "fan_in_fan_out is set to False but the target module is `Conv1D`. " + "Setting fan_in_fan_out to True." + ) + kwargs["fan_in_fan_out"] = ia3_config.fan_in_fan_out = True + new_module = Linear( + target, adapter_name, is_feedforward=is_feedforward, is_target_conv_1d_layer=True, **kwargs + ) + else: + raise ValueError( + f"Target module {target} is not supported. " + f"Currently, only `torch.nn.Linear`, `torch.nn.Conv2d`, and `Conv1D` are supported." + ) + return new_module + + @staticmethod + def _check_target_module_exists(ia3_config, key): + return check_target_module_exists(ia3_config, key) + + def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None: + for n, p in model.named_parameters(): + if self.prefix not in n: + p.requires_grad = False + + def _create_and_replace( + self, + ia3_config, + adapter_name, + target, + target_name, + parent, + current_key, + ): + # check if target module is in feedforward_modules + is_feedforward = self._check_target_module_feedforward(ia3_config, current_key) + + kwargs = { + "fan_in_fan_out": ia3_config.fan_in_fan_out, + "init_ia3_weights": ia3_config.init_ia3_weights, + "is_feedforward": is_feedforward, + "loaded_in_8bit": getattr(self.model, "is_loaded_in_8bit", False), + "loaded_in_4bit": getattr(self.model, "is_loaded_in_4bit", False), + } + + if isinstance(target, IA3Layer): + target.update_layer( + adapter_name, + ia3_config.init_ia3_weights, + ) + else: + new_module = self._create_new_module(ia3_config, adapter_name, target, **kwargs) + if adapter_name != self.active_adapter: + # adding an additional adapter: it is not automatically trainable + new_module.requires_grad_(False) + self._replace_module(parent, target_name, new_module, target) + + @staticmethod + def _check_target_module_feedforward(ia3_config, key) -> bool: + """ + A helper private method that checks if the target module `key` matches with a feedforward module specified in + `ia3_config` + """ + if isinstance(ia3_config.feedforward_modules, str): + is_feedforward = bool(re.fullmatch(ia3_config.feedforward_modules, key)) + else: + is_feedforward = any(key.endswith(target_key) for target_key in ia3_config.feedforward_modules) + return is_feedforward + + def _replace_module(self, parent, child_name, new_module, child): + setattr(parent, child_name, new_module) + + # child layer wraps the original module, unpack it + if hasattr(child, "base_layer"): + child = child.base_layer + + # layers with base_layer don't need the weight to be copied, as they have a reference already + if not hasattr(new_module, "base_layer"): + new_module.weight = child.weight + if hasattr(child, "bias"): + new_module.bias = child.bias + + if getattr(child, "state", None) is not None: + if hasattr(new_module, "base_layer"): + new_module.base_layer.state = child.state + else: + new_module.state = child.state + new_module.to(child.weight.device) + + # dispatch to correct device + for name, module in new_module.named_modules(): + if self.prefix in name: + module.to(child.weight.device) + + def __getattr__(self, name: str): + """Forward missing attributes to the wrapped module.""" + try: + return super().__getattr__(name) # defer to nn.Module's logic + except AttributeError: + return getattr(self.model, name) + + def get_peft_config_as_dict(self, inference: bool = False): + config_dict = {} + for key, value in self.peft_config.items(): + config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()} + if inference: + config["inference_mode"] = True + config_dict[key] = config + return config + + def _set_adapter_layers(self, enabled=True): + for module in self.model.modules(): + if isinstance(module, (IA3Layer, ModulesToSaveWrapper)): + module.enable_adapters(enabled) + + def enable_adapter_layers(self) -> None: + """Enable all adapters. + + Call this if you have previously disabled all adapters and want to re-enable them. + """ + self._set_adapter_layers(enabled=True) + + def disable_adapter_layers(self) -> None: + """Disable all adapters. + + When disabling all adapters, the model output corresponds to the output of the base model. + """ + self._set_adapter_layers(enabled=False) + + def set_adapter(self, adapter_name: str | list[str]) -> None: + """Set the active adapter(s). + + Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is + not desired, use the following code. + + ```py + >>> for name, param in model_peft.named_parameters(): + ... if ...: # some check on name (ex. if 'lora' in name) + ... param.requires_grad = False + ``` + + Args: + adapter_name (`str` or `list[str]`): Name of the adapter(s) to be activated. + """ + for module in self.model.modules(): + if isinstance(module, IA3Layer): + if module.merged: + warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.") + module.unmerge() + module.set_adapter(adapter_name) + + def _prepare_adapter_config(self, peft_config, model_config): + if peft_config.target_modules is None: + if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING: + raise ValueError("Please specify `target_modules` in `peft_config`") + peft_config.target_modules = TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING[model_config["model_type"]] + if peft_config.feedforward_modules is None: + if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING: + raise ValueError("Please specify `feedforward_modules` in `peft_config`") + peft_config.feedforward_modules = TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING[ + model_config["model_type"] + ] + return peft_config + + def _unload_and_optionally_merge( + self, merge: bool = True, safe_merge: bool = False, adapter_names: Optional[list[str]] = None + ): + r""" + This method merges the (IA)^3 layers into the base model. This is needed if someone wants to use the base model + as a standalone model. + + Args: + safe_merge (`bool`, `optional`, defaults to `False`): + If True, the merge operation will be performed in a copy of the original weights and check for NaNs + before merging the weights. This is useful if you want to check if the merge operation will produce + NaNs. Defaults to `False`. + adapter_names (`List[str]`, *optional*): + The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults + to `None`. + """ + if getattr(self.model, "is_loaded_in_8bit", False): + raise ValueError("Cannot merge ia3 layers when the model is loaded in 8-bit mode") + + if getattr(self.model, "is_loaded_in_4bit", False): + raise ValueError("Cannot merge ia3 layers when the model is loaded in 4-bit mode") + + self._unloading_checks(adapter_names) + key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] + for key in key_list: + try: + parent, target, target_name = _get_submodules(self.model, key) + except AttributeError: + continue + + if hasattr(target, "base_layer"): + if merge: + target.merge(safe_merge=safe_merge, adapter_names=adapter_names) + self._replace_module(parent, target_name, target.get_base_layer(), target) + elif isinstance(target, ModulesToSaveWrapper): + # save any additional trainable modules part of `modules_to_save` + new_module = target.modules_to_save[target.active_adapter] + if hasattr(new_module, "base_layer"): + # check if the module is itself a tuner layer + if merge: + new_module.merge(safe_merge=safe_merge, adapter_names=adapter_names) + new_module = new_module.get_base_layer() + setattr(parent, target_name, new_module) + + return self.model + + def merge_and_unload(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> torch.nn.Module: + r""" + This method merges the IA³ layers into the base model. This is needed if someone wants to use the base model as + a standalone model. + + Args: + safe_merge (`bool`): + whether to activate the safe merging check to check if there is any potential Nan in the adapter + weights + adapter_names (`List[str]`, *optional*): + The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults + to `None`. + + Example: + + ```py + >>> from transformers import AutoModelForCausalLM + >>> from peft import PeftModel + + >>> base_model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-40b") + >>> peft_model_id = "smangrul/falcon-40B-int4-peft-lora-sfttrainer-sample" + >>> model = PeftModel.from_pretrained(base_model, peft_model_id) + >>> merged_model = model.merge_and_unload() + ``` + """ + return self._unload_and_optionally_merge(safe_merge=safe_merge, adapter_names=adapter_names) + + def unload(self) -> torch.nn.Module: + """ + Gets back the base model by removing all the IA³ modules without merging. This gives back the original base + model. + """ + return self._unload_and_optionally_merge(merge=False) + + def delete_adapter(self, adapter_name: str) -> None: + """ + Deletes an existing adapter. + + Args: + adapter_name (str): Name of the adapter to be deleted. + """ + if adapter_name not in self.peft_config: + raise ValueError(f"Adapter {adapter_name} does not exist") + del self.peft_config[adapter_name] + + key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] + new_adapter = None + for key in key_list: + _, target, _ = _get_submodules(self.model, key) + if isinstance(target, IA3Layer): + target.delete_adapter(adapter_name) + if new_adapter is None: + new_adapter = target.active_adapters[:] + + self.active_adapter = new_adapter or [] diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/__init__.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2f39deee17ab9cb0e24b3a98d8b54eb7eeb27c1f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .config import LoHaConfig +from .layer import Conv2d, Linear, LoHaLayer +from .model import LoHaModel + + +__all__ = ["LoHaConfig", "LoHaModel", "Conv2d", "Linear", "LoHaLayer"] diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9047396e2a5b38a90dab455217d8a535f3ba385d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/config.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71c622f5d12dc1bca3c80ea4425ce29a3f6bd2c3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/config.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/layer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/layer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5451a76508f8dff8462ec81be1ff50d331cbc188 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/layer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/model.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..287ae5b72f696c4e81932b961160ffa28f3fd925 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/model.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/config.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/config.py new file mode 100644 index 0000000000000000000000000000000000000000..c38ba7828b59668d87113bd53a8cd6bd7bd570e2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/config.py @@ -0,0 +1,121 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass, field +from typing import List, Optional, Union + +from peft.tuners.lycoris_utils import LycorisConfig +from peft.utils import PeftType + + +@dataclass +class LoHaConfig(LycorisConfig): + """ + This is the configuration class to store the configuration of a [`LoHaModel`]. + + Args: + r (`int`): + LoHa rank. + alpha (`int`): + The alpha parameter for LoHa scaling. + rank_dropout (`float`): + The dropout probability for rank dimension during training. + module_dropout (`float`): + The dropout probability for disabling LoHa modules during training. + use_effective_conv2d (`bool`): + Use parameter effective decomposition for Conv2d with ksize > 1 ("Proposition 3" from FedPara paper). + target_modules (`Optional[Union[List[str], str]]`): + The names of the modules to apply the adapter to. If this is specified, only the modules with the specified + names will be replaced. When passing a string, a regex match will be performed. When passing a list of + strings, either an exact match will be performed or it is checked if the name of the module ends with any + of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen, + excluding the output layer. If this is not specified, modules will be chosen according to the model + architecture. If the architecture is not known, an error will be raised -- in this case, you should specify + the target modules manually. + init_weights (`bool`): + Whether to perform initialization of adapter weights. This defaults to `True`, passing `False` is + discouraged. + layers_to_transform (`Union[List[int], int]`): + The layer indices to transform. If a list of ints is passed, it will apply the adapter to the layer indices + that are specified in this list. If a single integer is passed, it will apply the transformations on the + layer at this index. + layers_pattern (`str`): + The layer pattern name, used only if `layers_to_transform` is different from `None`. + rank_pattern (`dict`): + The mapping from layer names or regexp expression to ranks which are different from the default rank + specified by `r`. + alpha_pattern (`dict`): + The mapping from layer names or regexp expression to alphas which are different from the default alpha + specified by `alpha`. + modules_to_save (`Optional[List[str]]`): + List of modules apart from adapter layers to be set as trainable and saved in the final checkpoint. + """ + + r: int = field(default=8, metadata={"help": "LoHa rank"}) + alpha: int = field(default=8, metadata={"help": "LoHa alpha"}) + rank_dropout: float = field( + default=0.0, metadata={"help": "The dropout probability for rank dimension during training"} + ) + module_dropout: float = field( + default=0.0, metadata={"help": "The dropout probability for disabling LoHa modules during training"} + ) + use_effective_conv2d: bool = field( + default=False, + metadata={ + "help": 'Use parameter effective decomposition for Conv2d 3x3 with ksize > 1 ("Proposition 3" from FedPara paper)' + }, + ) + target_modules: Optional[Union[List[str], str]] = field( + default=None, + metadata={ + "help": "List of module names or regex expression of the module names to replace with LoHa." + "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' " + "This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer." + }, + ) + init_weights: bool = field( + default=True, + metadata={ + "help": ( + "Whether to initialize the weights of the LoHa layers with their default initialization. Don't change " + "this setting, except if you know exactly what you're doing." + ), + }, + ) + layers_to_transform: Optional[Union[List[int], int]] = field( + default=None, + metadata={ + "help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index." + }, + ) + layers_pattern: Optional[str] = field( + default=None, + metadata={ + "help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern." + }, + ) + modules_to_save: Optional[List[str]] = field( + default=None, + metadata={ + "help": "List of modules apart from LoHA layers to be set as trainable and saved in the final checkpoint. " + "For example, in Sequence Classification or Token Classification tasks, " + "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved." + }, + ) + + def __post_init__(self): + self.peft_type = PeftType.LOHA + self.target_modules = ( + set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules + ) diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/layer.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/layer.py new file mode 100644 index 0000000000000000000000000000000000000000..b958decfad80ebb6eb5cb53f51365c2f7b5b1beb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/layer.py @@ -0,0 +1,375 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import Any, Set, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from peft.tuners.lycoris_utils import LycorisLayer + + +class LoHaLayer(nn.Module, LycorisLayer): + # All names of layers that may contain adapter weights + adapter_layer_names = ("hada_w1_a", "hada_w1_b", "hada_w2_a", "hada_w2_b", "hada_t1", "hada_t2") + # other_param_names is defined on parent class + + def __init__(self, base_layer: nn.Module): + super().__init__() + LycorisLayer.__init__(self, base_layer) + + # LoHa info + self.hada_w1_a = nn.ParameterDict({}) + self.hada_w1_b = nn.ParameterDict({}) + self.hada_w2_a = nn.ParameterDict({}) + self.hada_w2_b = nn.ParameterDict({}) + self.hada_t1 = nn.ParameterDict({}) + self.hada_t2 = nn.ParameterDict({}) + + @property + def _available_adapters(self) -> Set[str]: + return {*self.hada_w1_a, *self.hada_w1_b, *self.hada_w2_a, *self.hada_w2_b, *self.hada_t1, *self.hada_t2} + + def create_adapter_parameters(self, adapter_name: str, r: int, shape: Tuple[int, ...]): + # https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py#L130C9-L143C75 + if len(shape) == 4: + self.hada_t1[adapter_name] = nn.Parameter(torch.empty(r, r, shape[2], shape[3])) + self.hada_w1_a[adapter_name] = nn.Parameter(torch.empty(r, shape[0])) # out_dim, 1-mode + self.hada_w1_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1])) # in_dim , 2-mode + + self.hada_t2[adapter_name] = nn.Parameter(torch.empty(r, r, shape[2], shape[3])) + self.hada_w2_a[adapter_name] = nn.Parameter(torch.empty(r, shape[0])) # out_dim, 1-mode + self.hada_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1])) # in_dim , 2-mode + else: + self.hada_w1_a[adapter_name] = nn.Parameter(torch.empty(shape[0], r)) + self.hada_w1_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1])) + + self.hada_w2_a[adapter_name] = nn.Parameter(torch.empty(shape[0], r)) + self.hada_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1])) + + def reset_adapter_parameters(self, adapter_name: str): + # Original implementation performs initialization with normal distribution + # https://github.com/KohakuBlueleaf/LyCORIS/blob/3549fdef8f564761d68b695a08ef88b1122fdedc/lycoris/modules/loha.py#L158 + + # FedPara paper proposes to perform He initialization, let's stick with it + # It is enough to initialize only single matrix with zeros to make adapter do nothing after initialization + if adapter_name in self.hada_w1_a.keys(): + nn.init.kaiming_uniform_(self.hada_w1_a[adapter_name], a=math.sqrt(5)) + nn.init.kaiming_uniform_(self.hada_w1_b[adapter_name], a=math.sqrt(5)) + nn.init.kaiming_uniform_(self.hada_w2_a[adapter_name], a=math.sqrt(5)) + nn.init.zeros_(self.hada_w2_b[adapter_name]) + if adapter_name in self.hada_t1.keys(): + nn.init.kaiming_uniform_(self.hada_t1[adapter_name], a=math.sqrt(5)) + nn.init.kaiming_uniform_(self.hada_t2[adapter_name], a=math.sqrt(5)) + + def reset_adapter_parameters_random(self, adapter_name: str): + # Original implementation performs initialization with normal distribution + # https://github.com/KohakuBlueleaf/LyCORIS/blob/3549fdef8f564761d68b695a08ef88b1122fdedc/lycoris/modules/loha.py#L158 + + # FedPara paper proposes to perform He initialization, let's stick with it + # It is enough to initialize only single matrix with zeros to make adapter do nothing after initialization + if adapter_name in self.hada_w1_a.keys(): + nn.init.kaiming_uniform_(self.hada_w1_a[adapter_name], a=math.sqrt(5)) + nn.init.kaiming_uniform_(self.hada_w1_b[adapter_name], a=math.sqrt(5)) + nn.init.kaiming_uniform_(self.hada_w2_a[adapter_name], a=math.sqrt(5)) + nn.init.kaiming_uniform_(self.hada_w2_b[adapter_name], a=math.sqrt(5)) + if adapter_name in self.hada_t1.keys(): + nn.init.kaiming_uniform_(self.hada_t1[adapter_name], a=math.sqrt(5)) + nn.init.kaiming_uniform_(self.hada_t2[adapter_name], a=math.sqrt(5)) + + def update_layer( + self, + adapter_name: str, + r: int, + alpha: float, + rank_dropout: float, + module_dropout: float, + init_weights: bool, + use_effective_conv2d: bool = False, + **kwargs, + ) -> None: + """Internal function to create loha adapter + + Args: + adapter_name (`str`): Name for the adapter to add. + r (`int`): Rank for the added adapter. + alpha (`float`): Alpha for the added adapter. + rank_dropout (`float`): The dropout probability for rank dimension during training. + module_dropout (`float`): The dropout probability for disabling adapter during training. + init_weights (`bool`): Whether to initialize weights. + use_effective_conv2d (`bool`, *optional*, defaults to `False`): + Use parameter effective decomposition for Conv2d with ksize > 1. + """ + if r <= 0: + raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") + + self.r[adapter_name] = r + self.alpha[adapter_name] = alpha + self.scaling[adapter_name] = alpha / r + self.rank_dropout[adapter_name] = rank_dropout + self.module_dropout[adapter_name] = module_dropout + + # Determine shape of LoHa weights + base_layer = self.get_base_layer() + if isinstance(base_layer, nn.Linear): + shape = tuple(base_layer.weight.shape) + elif isinstance(base_layer, nn.Conv2d): + use_effective_conv2d = use_effective_conv2d and base_layer.kernel_size != (1, 1) + if use_effective_conv2d: + shape = (base_layer.out_channels, base_layer.in_channels, *base_layer.kernel_size) + else: + shape = ( + base_layer.out_channels, + base_layer.in_channels * base_layer.kernel_size[0] * base_layer.kernel_size[1], + ) + else: + raise TypeError(f"LoHa is not implemented for base layers of type {type(base_layer).__name__}") + + # Create weights with provided shape + self.create_adapter_parameters(adapter_name, r, shape) + + # Initialize weights + if init_weights: + self.reset_adapter_parameters(adapter_name) + else: + self.reset_adapter_parameters_random(adapter_name) + + # Move new weights to device + weight = getattr(self.get_base_layer(), "weight", None) + if weight is not None: + # the layer is already completely initialized, this is an update + if weight.dtype.is_floating_point or weight.dtype.is_complex: + self.to(weight.device, dtype=weight.dtype) + else: + self.to(weight.device) + self.set_adapter(self.active_adapters) + + def get_delta_weight(self, adapter_name: str) -> torch.Tensor: + # https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py#L178 + if adapter_name in self.hada_t1.keys(): + weight = make_weight_cp( + self.hada_t1[adapter_name], + self.hada_w1_a[adapter_name], + self.hada_w1_b[adapter_name], + self.hada_t2[adapter_name], + self.hada_w2_a[adapter_name], + self.hada_w2_b[adapter_name], + scale=torch.tensor(self.scaling[adapter_name]), + ) + else: + weight = make_weight( + self.hada_w1_a[adapter_name], + self.hada_w1_b[adapter_name], + self.hada_w2_a[adapter_name], + self.hada_w2_b[adapter_name], + scale=torch.tensor(self.scaling[adapter_name]), + ) + + base_layer = self.get_base_layer() + weight = weight.reshape(base_layer.weight.shape) + + # Perform rank dropout during training - drop rows of addition weights + rank_dropout = self.rank_dropout[adapter_name] + if self.training and rank_dropout: + drop = (torch.rand(weight.size(0)) > rank_dropout).to(weight.dtype) + drop = drop.view(-1, *[1] * len(weight.shape[1:])).to(weight.device) + # TODO: Investigate if there should be a scaler like in normal dropout during training + # Original implementation doesn't have it + # https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py#L193 + drop /= drop.mean() + weight *= drop + + return weight + + def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: + previous_dtype = x.dtype + + if self.disable_adapters: + if self.merged: + self.unmerge() + result = self.base_layer(x, *args, **kwargs) + elif self.merged: + result = self.base_layer(x, *args, **kwargs) + else: + result = self.base_layer(x, *args, **kwargs) + + # Execute all the adapters + for active_adapter in self.active_adapters: + if active_adapter not in self._available_adapters: + continue + + module_dropout = self.module_dropout[active_adapter] + + # Modify current execution weights + if (not self.training) or (self.training and torch.rand(1) > module_dropout): + result = result + self._get_delta_activations(active_adapter, x, *args, **kwargs) + + result = result.to(previous_dtype) + return result + + +class Linear(LoHaLayer): + """LoHa implemented in Linear layer""" + + def __init__( + self, + base_layer: nn.Module, + adapter_name: str = "default", + r: int = 0, + alpha: float = 0.0, + rank_dropout: float = 0.0, + module_dropout: float = 0.0, + init_weights: bool = True, + **kwargs, + ): + super().__init__(base_layer) + + # Create adapter and set it active + self._active_adapter = adapter_name + self.update_layer(adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, **kwargs) + + def _get_delta_activations( + self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any + ) -> torch.Tensor: + delta_weight = self.get_delta_weight(adapter_name) + # don't add bias here, because the bias is already included in the output of the base_layer + return F.linear(input, delta_weight) + + def __repr__(self) -> str: + rep = super().__repr__() + return "loha." + rep + + +class Conv2d(LoHaLayer): + """LoHa implemented in Conv2d layer""" + + def __init__( + self, + base_layer: nn.Module, + adapter_name: str = "default", + r: int = 0, + alpha: float = 0.0, + rank_dropout: float = 0.0, + module_dropout: float = 0.0, + use_effective_conv2d: bool = False, + init_weights: bool = True, + **kwargs, + ): + super().__init__(base_layer) + + # Create adapter and set it active + self._active_adapter = adapter_name + self.update_layer( + adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, use_effective_conv2d, **kwargs + ) + + def _get_delta_activations( + self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any + ) -> torch.Tensor: + delta_weight = self.get_delta_weight(adapter_name) + # don't add bias here, because the bias is already included in the output of the base_layer + base_layer = self.get_base_layer() + return F.conv2d( + input, + delta_weight, + stride=base_layer.stride, + padding=base_layer.padding, + dilation=base_layer.dilation, + groups=base_layer.groups, + ) + + def __repr__(self) -> str: + rep = super().__repr__() + return "loha." + rep + + +# Below code is a direct copy from https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py#L9 + + +class HadaWeight(torch.autograd.Function): + @staticmethod + def forward(ctx, w1a, w1b, w2a, w2b, scale=torch.tensor(1)): + ctx.save_for_backward(w1a, w1b, w2a, w2b, scale) + diff_weight = ((w1a @ w1b) * (w2a @ w2b)) * scale + return diff_weight + + @staticmethod + def backward(ctx, grad_out): + (w1a, w1b, w2a, w2b, scale) = ctx.saved_tensors + grad_out = grad_out * scale + temp = grad_out * (w2a @ w2b) + grad_w1a = temp @ w1b.T + grad_w1b = w1a.T @ temp + + temp = grad_out * (w1a @ w1b) + grad_w2a = temp @ w2b.T + grad_w2b = w2a.T @ temp + + del temp + return grad_w1a, grad_w1b, grad_w2a, grad_w2b, None + + +class HadaWeightCP(torch.autograd.Function): + @staticmethod + def forward(ctx, t1, w1a, w1b, t2, w2a, w2b, scale=torch.tensor(1)): + ctx.save_for_backward(t1, w1a, w1b, t2, w2a, w2b, scale) + + rebuild1 = torch.einsum("i j k l, j r, i p -> p r k l", t1, w1b, w1a) + rebuild2 = torch.einsum("i j k l, j r, i p -> p r k l", t2, w2b, w2a) + + return rebuild1 * rebuild2 * scale + + @staticmethod + def backward(ctx, grad_out): + (t1, w1a, w1b, t2, w2a, w2b, scale) = ctx.saved_tensors + grad_out = grad_out * scale + + temp = torch.einsum("i j k l, j r -> i r k l", t2, w2b) + rebuild = torch.einsum("i j k l, i r -> r j k l", temp, w2a) + + grad_w = rebuild * grad_out + del rebuild + + grad_w1a = torch.einsum("r j k l, i j k l -> r i", temp, grad_w) + grad_temp = torch.einsum("i j k l, i r -> r j k l", grad_w, w1a.T) + del grad_w, temp + + grad_w1b = torch.einsum("i r k l, i j k l -> r j", t1, grad_temp) + grad_t1 = torch.einsum("i j k l, j r -> i r k l", grad_temp, w1b.T) + del grad_temp + + temp = torch.einsum("i j k l, j r -> i r k l", t1, w1b) + rebuild = torch.einsum("i j k l, i r -> r j k l", temp, w1a) + + grad_w = rebuild * grad_out + del rebuild + + grad_w2a = torch.einsum("r j k l, i j k l -> r i", temp, grad_w) + grad_temp = torch.einsum("i j k l, i r -> r j k l", grad_w, w2a.T) + del grad_w, temp + + grad_w2b = torch.einsum("i r k l, i j k l -> r j", t2, grad_temp) + grad_t2 = torch.einsum("i j k l, j r -> i r k l", grad_temp, w2b.T) + del grad_temp + return grad_t1, grad_w1a, grad_w1b, grad_t2, grad_w2a, grad_w2b, None + + +def make_weight(w1a, w1b, w2a, w2b, scale): + return HadaWeight.apply(w1a, w1b, w2a, w2b, scale) + + +def make_weight_cp(t1, w1a, w1b, t2, w2a, w2b, scale): + return HadaWeightCP.apply(t1, w1a, w1b, t2, w2a, w2b, scale) diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/model.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/model.py new file mode 100644 index 0000000000000000000000000000000000000000..6f1aaac9d5918959edb04e826c6a81edf02a625e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/model.py @@ -0,0 +1,114 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +from itertools import chain +from typing import Dict, Type, Union + +import torch +from torch import nn + +from peft.tuners.lycoris_utils import LycorisConfig, LycorisTuner + +from .layer import Conv2d, Linear, LoHaLayer + + +class LoHaModel(LycorisTuner): + """ + Creates Low-Rank Hadamard Product model from a pretrained model. The method is partially described in + https://arxiv.org/abs/2108.06098 Current implementation heavily borrows from + https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py + + Args: + model (`torch.nn.Module`): The model to which the adapter tuner layers will be attached. + config ([`LoHaConfig`]): The configuration of the LoHa model. + adapter_name (`str`): The name of the adapter, defaults to `"default"`. + + Returns: + `torch.nn.Module`: The LoHa model. + + Example: + ```py + >>> from diffusers import StableDiffusionPipeline + >>> from peft import LoHaModel, LoHaConfig + + >>> config_te = LoHaConfig( + ... r=8, + ... lora_alpha=32, + ... target_modules=["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"], + ... rank_dropout=0.0, + ... module_dropout=0.0, + ... init_weights=True, + ... ) + >>> config_unet = LoHaConfig( + ... r=8, + ... lora_alpha=32, + ... target_modules=[ + ... "proj_in", + ... "proj_out", + ... "to_k", + ... "to_q", + ... "to_v", + ... "to_out.0", + ... "ff.net.0.proj", + ... "ff.net.2", + ... ], + ... rank_dropout=0.0, + ... module_dropout=0.0, + ... init_weights=True, + ... use_effective_conv2d=True, + ... ) + + >>> model = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") + >>> model.text_encoder = LoHaModel(model.text_encoder, config_te, "default") + >>> model.unet = LoHaModel(model.unet, config_unet, "default") + ``` + + **Attributes**: + - **model** ([`~torch.nn.Module`]) -- The model to be adapted. + - **peft_config** ([`LoHaConfig`]): The configuration of the LoHa model. + """ + + prefix: str = "hada_" + layers_mapping: Dict[Type[torch.nn.Module], Type[LoHaLayer]] = { + torch.nn.Conv2d: Conv2d, + torch.nn.Linear: Linear, + } + + def _create_and_replace( + self, + config: LycorisConfig, + adapter_name: str, + target: Union[LoHaLayer, nn.Module], + target_name: str, + parent: nn.Module, + current_key: str, + ) -> None: + """ + A private method to create and replace the target module with the adapter module. + """ + + # Regexp matching - Find key which matches current target_name in patterns provided + pattern_keys = list(chain(config.rank_pattern.keys(), config.alpha_pattern.keys())) + target_name_key = next(filter(lambda key: re.match(rf"(.*\.)?{key}$", current_key), pattern_keys), target_name) + + kwargs = config.to_dict() + kwargs["r"] = config.rank_pattern.get(target_name_key, config.r) + kwargs["alpha"] = config.alpha_pattern.get(target_name_key, config.alpha) + + if isinstance(target, LoHaLayer): + target.update_layer(adapter_name, **kwargs) + else: + new_module = self._create_new_module(config, adapter_name, target, **kwargs) + self._replace_module(parent, target_name, new_module, target) diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/config.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e4c9cdeaf87668c0d64b695983cf0d12fc0132c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/config.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/aqlm.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/aqlm.py new file mode 100644 index 0000000000000000000000000000000000000000..8c8e90e62bb439467ea0300954e1f673e0e431ae --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/aqlm.py @@ -0,0 +1,100 @@ +# Copyright 2024-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Optional + +import torch + +from peft.import_utils import is_aqlm_available +from peft.tuners.lora.layer import LoraLayer +from peft.tuners.tuners_utils import BaseTunerLayer + + +if is_aqlm_available(): + from aqlm import QuantizedLinear + + +class AqlmLoraLinear(torch.nn.Module, LoraLayer): + def __init__( + self, + base_layer, + adapter_name: str, + r: int = 0, + lora_alpha: int = 1, + lora_dropout: float = 0.0, + init_lora_weights: bool = True, + use_rslora: bool = False, + **kwargs, + ): + super().__init__() + LoraLayer.__init__(self, base_layer) + + self._active_adapter = adapter_name + self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora) + + def forward(self, x: torch.Tensor): + # note: logic differs from default Linear because merging is not supported + result = self.base_layer(x) + + if self.disable_adapters: + return result + + for active_adapter in self.active_adapters: + if active_adapter not in self.lora_A.keys(): + continue + lora_A = self.lora_A[active_adapter] + lora_B = self.lora_B[active_adapter] + dropout = self.lora_dropout[active_adapter] + scaling = self.scaling[active_adapter] + + requires_conversion = not torch.is_autocast_enabled() + if requires_conversion: + expected_dtype = result.dtype + x = x.to(lora_A.weight.dtype) + + output = lora_B(lora_A(dropout(x))) + if requires_conversion: + output = output.to(expected_dtype) + output = output * scaling + result += output + return result + + def __repr__(self) -> str: + rep = super().__repr__() + return "lora." + rep + + # TODO: Check if it is better as suggested by users https://github.com/PanQiWei/AutoGPTQ/pull/102 + # def reset_lora_parameters(self, adapter_name): + # if adapter_name in self.lora_A.keys(): + # torch.nn.init.xavier_uniform_(self.lora_A[adapter_name].weight) + # torch.nn.init.zeros_(self.lora_B[adapter_name].weight) + + +def dispatch_aqlm( + target: torch.nn.Module, + adapter_name: str, + **kwargs: Any, +) -> Optional[torch.nn.Module]: + new_module = None + + if isinstance(target, BaseTunerLayer): + target_base_layer = target.get_base_layer() + else: + target_base_layer = target + + if is_aqlm_available() and isinstance(target_base_layer, QuantizedLinear): + new_module = AqlmLoraLinear(target, adapter_name, **kwargs) + target.qweight = target_base_layer.codes + + return new_module diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/awq.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/awq.py new file mode 100644 index 0000000000000000000000000000000000000000..b3f5bf3978f18ea58b52e5df1bcaf25f1e44fd40 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/awq.py @@ -0,0 +1,108 @@ +# Copyright 2024-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import importlib.metadata as importlib_metadata +from typing import Any, Optional + +import packaging.version +import torch + +from peft.import_utils import is_auto_awq_available +from peft.tuners.lora.layer import LoraLayer +from peft.tuners.tuners_utils import BaseTunerLayer + + +if is_auto_awq_available(): + from awq.modules.linear import WQLinear_GEMM + + +class AwqLoraLinear(torch.nn.Module, LoraLayer): + def __init__( + self, + base_layer, + adapter_name, + r: int = 0, + lora_alpha: int = 1, + lora_dropout: float = 0.0, + init_lora_weights: bool = True, + use_rslora: bool = False, + **kwargs, + ): + super().__init__() + LoraLayer.__init__(self, base_layer) + + # self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter + # for backwards compatibility + self.quant_linear_module = base_layer + + self._active_adapter = adapter_name + self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora) + + def forward(self, x: torch.Tensor): + result = self.quant_linear_module(x) + + if self.disable_adapters: + return result + + for active_adapter in self.active_adapters: + if active_adapter not in self.lora_A.keys(): + continue + lora_A = self.lora_A[active_adapter] + lora_B = self.lora_B[active_adapter] + dropout = self.lora_dropout[active_adapter] + scaling = self.scaling[active_adapter] + + requires_conversion = not torch.is_autocast_enabled() + if requires_conversion: + expected_dtype = result.dtype + x = x.to(lora_A.weight.dtype) + + output = lora_B(lora_A(dropout(x))) + if requires_conversion: + output = output.to(expected_dtype) + output = output * scaling + result = result + output + return result + + def __repr__(self) -> str: + rep = super().__repr__() + return "lora." + rep + + +def dispatch_awq( + target: torch.nn.Module, + adapter_name: str, + **kwargs: Any, +) -> Optional[torch.nn.Module]: + new_module = None + + if isinstance(target, BaseTunerLayer): + target_base_layer = target.get_base_layer() + else: + target_base_layer = target + + if is_auto_awq_available() and isinstance(target_base_layer, WQLinear_GEMM): + # Raise the error only at the dispatch level + AUTOAWQ_MINIMUM_VERSION = packaging.version.parse("0.2.0") + version_autoawq = packaging.version.parse(importlib_metadata.version("autoawq")) + + if AUTOAWQ_MINIMUM_VERSION > version_autoawq: + raise ImportError( + f"Found an incompatible version of auto-awq. Found version {version_autoawq}, " + f"but only versions above {AUTOAWQ_MINIMUM_VERSION} are supported for PEFT." + ) + + new_module = AwqLoraLinear(target, adapter_name, **kwargs) + target.qweight = target_base_layer.qweight + + return new_module diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/bnb.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/bnb.py new file mode 100644 index 0000000000000000000000000000000000000000..c9f8cf3f6dd88b99fd308b73a998b17f76eac513 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/bnb.py @@ -0,0 +1,508 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import warnings +from typing import Any, Optional + +import bitsandbytes as bnb +import torch + +from peft.import_utils import is_bnb_4bit_available, is_bnb_available +from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge +from peft.utils.integrations import dequantize_bnb_weight +from peft.utils.other import transpose + +from .layer import LoraLayer + + +if is_bnb_available(): + + class Linear8bitLt(torch.nn.Module, LoraLayer): + # Lora implemented in a dense layer + def __init__( + self, + base_layer: torch.nn.Module, + adapter_name: str, + r: int = 0, + lora_alpha: int = 1, + lora_dropout: float = 0.0, + init_lora_weights: bool = True, + use_rslora: bool = False, + use_dora: bool = False, + **kwargs, + ) -> None: + super().__init__() + LoraLayer.__init__(self, base_layer) + self.fan_in_fan_out = False + + self._active_adapter = adapter_name + self.update_layer( + adapter_name, + r, + lora_alpha=lora_alpha, + lora_dropout=lora_dropout, + init_lora_weights=init_lora_weights, + use_rslora=use_rslora, + use_dora=use_dora, + ) + + def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: + """ + Merge the active adapter weights into the base weights + + Args: + safe_merge (`bool`, *optional*): + If True, the merge operation will be performed in a copy of the original weights and check for NaNs + before merging the weights. This is useful if you want to check if the merge operation will produce + NaNs. Defaults to `False`. + adapter_names (`list[str]`, *optional*): + The list of adapter names that should be merged. If None, all active adapters will be merged. + Defaults to `None`. + """ + adapter_names = check_adapters_to_merge(self, adapter_names) + if not adapter_names: + # no adapter to merge + return + + for active_adapter in adapter_names: + if active_adapter not in self.lora_A.keys(): + continue + + warnings.warn( + "Merge lora module to 8-bit linear may get different generations due to rounding errors." + ) + lora_data = self.get_delta_weight(active_adapter) + + weight = self.get_base_layer().weight + state = self.get_base_layer().state + if state.SCB is None: + state.SCB = weight.SCB + + # Dequantize the result of identity matrix and int8 weight because bitsandbytes does not support int8 + # dequantization directly + output = dequantize_bnb_weight(weight, state=state) + if not self.use_dora[active_adapter]: + w_data = output.to(lora_data.dtype).to(lora_data.device) + lora_data + else: + # handle dora + # since output already includes scaling, set it to 1 here + weight_norm = self._get_weight_norm(output, lora_data, scaling=1).detach() + # We need to cache weight_norm because it has to be based on the original weights. We + # cannot calculate it on the fly based on the merged weights when unmerging because its a + # different value + self._cache_store(f"{active_adapter}-weight_norm", weight_norm) + dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm + w_data = dora_factor.view(-1, 1) * (output + lora_data) + + if safe_merge and not torch.isfinite(w_data).all(): + raise ValueError( + f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" + ) + + self.get_base_layer().weight = bnb.nn.Int8Params( + w_data.to("cpu"), requires_grad=False, has_fp16_weights=weight.has_fp16_weights + ).to(weight.device) + state.reset_grads() + self.merged_adapters.append(active_adapter) + + def unmerge(self) -> None: + """ + This method unmerges all merged adapter layers from the base weights. + """ + if not self.merged: + warnings.warn("Already unmerged. Nothing to do.") + return + + while len(self.merged_adapters) > 0: + active_adapter = self.merged_adapters.pop() + if active_adapter not in self.lora_A.keys(): + continue + warnings.warn( + "Unmerge lora module to 8-bit linear may get different generations due to rounding errors." + ) + lora_data = self.get_delta_weight(active_adapter) + + weight = self.get_base_layer().weight + state = self.get_base_layer().state + if state.SCB is None: + state.SCB = weight.SCB + output = dequantize_bnb_weight(weight, state=state) + + if not self.use_dora[active_adapter]: + w_data = output.to(lora_data.dtype).to(lora_data.device) - lora_data + else: + weight_norm = self._cache_pop(f"{active_adapter}-weight_norm") + dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm + w_data = output.data / dora_factor.view(-1, 1) - lora_data + + self.get_base_layer().weight = bnb.nn.Int8Params( + w_data.to("cpu"), requires_grad=False, has_fp16_weights=weight.has_fp16_weights + ).to(weight.device) + state.reset_grads() + + def get_delta_weight(self, adapter): + return ( + transpose( + self.lora_B[adapter].weight @ self.lora_A[adapter].weight, + False, + ) + * self.scaling[adapter] + ) + + def _mixed_batch_forward( + self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any + ) -> torch.Tensor: + # This is a special method that handles the case when users pass the argument `adapter_names`. This is an + # extra argument that allows mixing different adapters in the same batch at inference time. + result = self.base_layer(x, *args, **kwargs) + + unique_adapters = set(adapter_names) + sub_batch_indices_list = [] + for adapter in unique_adapters: + sub_batch_indices_list.append([index for index, item in enumerate(adapter_names) if item == adapter]) + + for i, active_adapter in enumerate(unique_adapters): + if active_adapter == "__base__": + continue + if active_adapter not in self.lora_A.keys(): + continue + + lora_A = self.lora_A[active_adapter] + lora_B = self.lora_B[active_adapter] + dropout = self.lora_dropout[active_adapter] + scaling = self.scaling[active_adapter] + + requires_conversion = not torch.is_autocast_enabled() + if requires_conversion: + expected_dtype = result.dtype + compute_dtype = lora_A.weight.dtype + if x.dtype != compute_dtype: + x = x.to(compute_dtype) + + # getting the sub-batch, passing it to LoRA layers and updating the corresponding indices of the linear + # layer output + sub_batch = x[sub_batch_indices_list[i]] + output = lora_B(lora_A(dropout(sub_batch))) * scaling + if requires_conversion: + output = output.to(expected_dtype) + result[sub_batch_indices_list[i]] += output + + return result + + def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: + self._check_forward_args(x, *args, **kwargs) + adapter_names = kwargs.pop("adapter_names", None) + + if self.disable_adapters: + if self.merged: + self.unmerge() + result = self.base_layer(x, *args, **kwargs) + elif adapter_names is not None: + result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs) + elif self.merged: + result = self.base_layer(x, *args, **kwargs) + else: + result = self.base_layer(x, *args, **kwargs) + for active_adapter in self.active_adapters: + if active_adapter not in self.lora_A.keys(): + continue + lora_A = self.lora_A[active_adapter] + lora_B = self.lora_B[active_adapter] + dropout = self.lora_dropout[active_adapter] + scaling = self.scaling[active_adapter] + + requires_conversion = not torch.is_autocast_enabled() + if requires_conversion: + expected_dtype = result.dtype + compute_dtype = lora_A.weight.dtype + if x.dtype != compute_dtype: + x = x.to(compute_dtype) + + if not self.use_dora[active_adapter]: + output = lora_B(lora_A(dropout(x))) * scaling + else: + output = self._apply_dora(x, lora_A, lora_B, scaling, active_adapter) + if requires_conversion: + output = output.to(expected_dtype) + + result = result + output + + return result + + def __repr__(self) -> str: + rep = super().__repr__() + return "lora." + rep + + def dispatch_bnb_8bit(target: torch.nn.Module, adapter_name: str, **kwargs): + new_module = None + + if isinstance(target, BaseTunerLayer): + target_base_layer = target.get_base_layer() + else: + target_base_layer = target + + loaded_in_8bit = kwargs.get("loaded_in_8bit", False) + if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt): + eightbit_kwargs = kwargs.copy() + eightbit_kwargs.update( + { + "has_fp16_weights": target.state.has_fp16_weights, + "memory_efficient_backward": target.state.memory_efficient_backward, + "threshold": target.state.threshold, + "index": target.index, + } + ) + new_module = Linear8bitLt(target, adapter_name, **eightbit_kwargs) + + return new_module + + +if is_bnb_4bit_available(): + + class Linear4bit(torch.nn.Module, LoraLayer): + # Lora implemented in a dense layer + def __init__( + self, + base_layer: torch.nn.Module, + adapter_name: str, + r: int = 0, + lora_alpha: int = 1, + lora_dropout: float = 0.0, + init_lora_weights: bool = True, + use_rslora: bool = False, + use_dora: bool = False, + **kwargs, + ) -> None: + super().__init__() + LoraLayer.__init__(self, base_layer) + self.fan_in_fan_out = False + + self._active_adapter = adapter_name + self.update_layer( + adapter_name, + r, + lora_alpha=lora_alpha, + lora_dropout=lora_dropout, + init_lora_weights=init_lora_weights, + use_rslora=use_rslora, + use_dora=use_dora, + ) + + def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: + """ + Merge the active adapter weights into the base weights + + Args: + safe_merge (`bool`, *optional*): + If True, the merge operation will be performed in a copy of the original weights and check for NaNs + before merging the weights. This is useful if you want to check if the merge operation will produce + NaNs. Defaults to `False`. + adapter_names (`list[str]`, *optional*): + The list of adapter names that should be merged. If None, all active adapters will be merged. + Defaults to `None`. + """ + adapter_names = check_adapters_to_merge(self, adapter_names) + if not adapter_names: + # no adapter to merge + return + + for active_adapter in adapter_names: + if active_adapter not in self.lora_A.keys(): + continue + + warnings.warn( + "Merge lora module to 4-bit linear may get different generations due to rounding errors." + ) + # Refer to https://gist.github.com/ChrisHayduk/1a53463331f52dca205e55982baf9930 + weight = self.get_base_layer().weight + kwargs = weight.__dict__ + lora_data = self.get_delta_weight(active_adapter) + + output = dequantize_bnb_weight(weight, state=weight.quant_state) + if not self.use_dora[active_adapter]: + w_data = output + lora_data + else: + # handle dora + # since output already includes scaling, set it to 1 here + weight_norm = self._get_weight_norm(output, lora_data, scaling=1).detach() + # We need to cache weight_norm because it has to be based on the original weights. We + # cannot calculate it on the fly based on the merged weights when unmerging because its a + # different value + self._cache_store(f"{active_adapter}-weight_norm", weight_norm) + dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm + w_data = dora_factor.view(-1, 1) * (output + lora_data) + + if safe_merge and not torch.isfinite(w_data).all(): + raise ValueError( + f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" + ) + if "bnb_quantized" in kwargs: + kwargs["bnb_quantized"] = False + self.get_base_layer().weight = bnb.nn.Params4bit(w_data.to("cpu"), requires_grad=False, **kwargs).to( + weight.device + ) + self.merged_adapters.append(active_adapter) + + def unmerge(self) -> None: + """ + This method unmerges all merged adapter layers from the base weights. + """ + if not self.merged: + warnings.warn("Already unmerged. Nothing to do.") + return + + while len(self.merged_adapters) > 0: + active_adapter = self.merged_adapters.pop() + if active_adapter not in self.lora_A.keys(): + continue + warnings.warn( + "Unmerge lora module to 4-bit linear may get different generations due to rounding errors." + ) + + lora_data = self.get_delta_weight(active_adapter) + weight = self.get_base_layer().weight + kwargs = weight.__dict__ + output = dequantize_bnb_weight(weight, state=weight.quant_state) + + if not self.use_dora[active_adapter]: + w_data = output - lora_data + else: + weight_norm = self._cache_pop(f"{active_adapter}-weight_norm") + dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm + w_data = output.data / dora_factor.view(-1, 1) - lora_data + + if "bnb_quantized" in kwargs: + kwargs["bnb_quantized"] = False + self.get_base_layer().weight = bnb.nn.Params4bit(w_data.to("cpu"), requires_grad=False, **kwargs).to( + weight.device + ) + + def get_delta_weight(self, adapter): + return ( + transpose( + self.lora_B[adapter].weight @ self.lora_A[adapter].weight, + False, + ) + * self.scaling[adapter] + ) + + def _mixed_batch_forward( + self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any + ) -> torch.Tensor: + # This is a special method that handles the case when users pass the argument `adapter_names`. This is an + # extra argument that allows mixing different adapters in the same batch at inference time. + result = self.base_layer(x, *args, **kwargs) + + unique_adapters = set(adapter_names) + sub_batch_indices_list = [] + for adapter in unique_adapters: + sub_batch_indices_list.append([index for index, item in enumerate(adapter_names) if item == adapter]) + + for i, active_adapter in enumerate(unique_adapters): + if active_adapter == "__base__": + continue + if active_adapter not in self.lora_A.keys(): + continue + + lora_A = self.lora_A[active_adapter] + lora_B = self.lora_B[active_adapter] + dropout = self.lora_dropout[active_adapter] + scaling = self.scaling[active_adapter] + + requires_conversion = not torch.is_autocast_enabled() + if requires_conversion: + expected_dtype = result.dtype + x = x.to(lora_A.weight.dtype) + + # getting the sub-batch, passing it to LoRA layers and updating the corresponding indices of the linear + # layer output + sub_batch = x[sub_batch_indices_list[i]] + output = lora_B(lora_A(dropout(sub_batch))) * scaling + if requires_conversion: + output = output.to(expected_dtype) + result[sub_batch_indices_list[i]] += output + + return result + + def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: + self._check_forward_args(x, *args, **kwargs) + adapter_names = kwargs.pop("adapter_names", None) + + if self.disable_adapters: + if self.merged: + self.unmerge() + result = self.base_layer(x, *args, **kwargs) + elif adapter_names is not None: + result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs) + elif self.merged: + result = self.base_layer(x, *args, **kwargs) + else: + result = self.base_layer(x, *args, **kwargs) + # As per Tim Dettmers, for 4bit, we need to defensively clone here. + # The reason is that in some cases, an error can occur that backprop + # does not work on a manipulated view. This issue may be solved with + # newer PyTorch versions but this would need extensive testing to be + # sure. + result = result.clone() + + for active_adapter in self.active_adapters: + if active_adapter not in self.lora_A.keys(): + continue + lora_A = self.lora_A[active_adapter] + lora_B = self.lora_B[active_adapter] + dropout = self.lora_dropout[active_adapter] + scaling = self.scaling[active_adapter] + + requires_conversion = not torch.is_autocast_enabled() + if requires_conversion: + expected_dtype = result.dtype + x = x.to(lora_A.weight.dtype) + + if not self.use_dora[active_adapter]: + output = lora_B(lora_A(dropout(x))) * scaling + else: + output = self._apply_dora(x, lora_A, lora_B, scaling, active_adapter) + if requires_conversion: + output = output.to(expected_dtype) + + result = result + output + + return result + + def __repr__(self) -> str: + rep = super().__repr__() + return "lora." + rep + + def dispatch_bnb_4bit(target: torch.nn.Module, adapter_name: str, **kwargs): + new_module = None + + if isinstance(target, BaseTunerLayer): + target_base_layer = target.get_base_layer() + else: + target_base_layer = target + + loaded_in_4bit = kwargs.get("loaded_in_4bit", False) + if loaded_in_4bit and is_bnb_4bit_available() and isinstance(target_base_layer, bnb.nn.Linear4bit): + fourbit_kwargs = kwargs.copy() + fourbit_kwargs.update( + { + "compute_dtype": target_base_layer.compute_dtype, + "compress_statistics": target_base_layer.weight.compress_statistics, + "quant_type": target_base_layer.weight.quant_type, + } + ) + new_module = Linear4bit(target, adapter_name, **fourbit_kwargs) + + return new_module diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/config.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/config.py new file mode 100644 index 0000000000000000000000000000000000000000..cc5c60a753c84cdf471ac3f9bd1327767440e185 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/config.py @@ -0,0 +1,299 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from dataclasses import dataclass, field +from typing import Literal, Optional, Union + +from peft.config import PeftConfig +from peft.utils import PeftType + + +@dataclass +class LoftQConfig: + """ + This is the sub-configuration class to store the configuration of a [`LoraModel`]. + + Args: + bits_pattern (`dict`): The mapping from layer names or regexp expression to bits which are different from the + default bits specified by `bits`. For example, `{model.decoder.layers.0.encoder_attn.k_proj: 2`}. + bits (`int`): Quantization bits for LoftQ. + iter (`int`): Alternating iterations for LoftQ. + fake (`bool`): True: use fp16/fp32; used for first time to save weights. False: use bitsandbytes 4bit linear + models. weights can't be saved. Recommend to set to True, save the weights and load the saved weights in 4 + bits. + """ + + loftq_bits: int = field(default=4, metadata={"help": "Quantization bits for LoftQ"}) + loftq_iter: int = field(default=1, metadata={"help": "Alternating iterations for LoftQ"}) + + +@dataclass +class LoraConfig(PeftConfig): + """ + This is the configuration class to store the configuration of a [`LoraModel`]. + + Args: + r (`int`): + Lora attention dimension (the "rank"). + target_modules (`Optional[Union[List[str], str]]`): + The names of the modules to apply the adapter to. If this is specified, only the modules with the specified + names will be replaced. When passing a string, a regex match will be performed. When passing a list of + strings, either an exact match will be performed or it is checked if the name of the module ends with any + of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen, + excluding the output layer. If this is not specified, modules will be chosen according to the model + architecture. If the architecture is not known, an error will be raised -- in this case, you should specify + the target modules manually. + lora_alpha (`int`): + The alpha parameter for Lora scaling. + lora_dropout (`float`): + The dropout probability for Lora layers. + fan_in_fan_out (`bool`): + Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses + `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`. + bias (`str`): + Bias type for LoRA. Can be 'none', 'all' or 'lora_only'. If 'all' or 'lora_only', the corresponding biases + will be updated during training. Be aware that this means that, even when disabling the adapters, the model + will not produce the same output as the base model would have without adaptation. + use_rslora (`bool`): + When set to True, uses Rank-Stabilized LoRA which + sets the adapter scaling factor to `lora_alpha/math.sqrt(r)`, since it was proven to work better. + Otherwise, it will use the original default value of `lora_alpha/r`. + modules_to_save (`List[str]`): + List of modules apart from adapter layers to be set as trainable and saved in the final checkpoint. + init_lora_weights (`bool` | `Literal["gaussian", "loftq"]`): + How to initialize the weights of the adapter layers. Passing True (default) results in the default + initialization from the reference implementation from Microsoft. Passing 'gaussian' results in Gaussian + initialization scaled by the LoRA rank for linear and layers. Setting the initialization to False leads to + completely random initialization and is discouraged. Pass `'loftq'` to use LoftQ initialization. + layers_to_transform (`Union[List[int], int]`): + The layer indices to transform. If a list of ints is passed, it will apply the adapter to the layer indices + that are specified in this list. If a single integer is passed, it will apply the transformations on the + layer at this index. + layers_pattern (`str`): + The layer pattern name, used only if `layers_to_transform` is different from `None`. + rank_pattern (`dict`): + The mapping from layer names or regexp expression to ranks which are different from the default rank + specified by `r`. + alpha_pattern (`dict`): + The mapping from layer names or regexp expression to alphas which are different from the default alpha + specified by `lora_alpha`. + megatron_config (`Optional[dict]`): + The TransformerConfig arguments for Megatron. It is used to create LoRA's parallel linear layer. You can + get it like this, `core_transformer_config_from_args(get_args())`, these two functions being from Megatron. + The arguments will be used to initialize the TransformerConfig of Megatron. You need to specify this + parameter when you want to apply LoRA to the ColumnParallelLinear and RowParallelLinear layers of megatron. + megatron_core (`Optional[str]`): + The core module from Megatron to use, defaults to `"megatron.core"`. + loftq_config (`Optional[LoftQConfig]`): + The configuration of LoftQ. If this is not None, then LoftQ will be used to quantize the backbone weights + and initialize Lora layers. Also pass `init_lora_weights='loftq'`. Note that you should not pass a + quantized model in this case, as LoftQ will quantize the model itself. + use_dora (`bool`): + Enable 'Weight-Decomposed Low-Rank Adaptation' (DoRA). This technique decomposes the updates of the weights + into two parts, magnitude and direction. Direction is handled by normal LoRA, whereas the magnitude is + handled by a separate learnable parameter. This can improve the performance of LoRA especially at low + ranks. Right now, DoRA only supports linear and Conv2D layers. DoRA introduces a bigger overhead than pure + LoRA, so it is recommended to merge weights for inference. For more information, see + https://arxiv.org/abs/2402.09353. + layer_replication(`List[Tuple[int, int]]`): + Build a new stack of layers by stacking the original model layers according to the ranges specified. This + allows expanding (or shrinking) the model without duplicating the base model weights. The new layers will + all have separate LoRA adapters attached to them. + """ + + r: int = field(default=8, metadata={"help": "Lora attention dimension"}) + target_modules: Optional[Union[list[str], str]] = field( + default=None, + metadata={ + "help": ( + "List of module names or regex expression of the module names to replace with LoRA." + "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'." + "This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer." + "If not specified, modules will be chosen according to the model architecture, If the architecture is " + "not known, an error will be raised -- in this case, you should specify the target modules manually." + ), + }, + ) + lora_alpha: int = field(default=8, metadata={"help": "Lora alpha"}) + lora_dropout: float = field(default=0.0, metadata={"help": "Lora dropout"}) + fan_in_fan_out: bool = field( + default=False, + metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"}, + ) + bias: Literal["none", "all", "lora_only"] = field( + default="none", metadata={"help": "Bias type for Lora. Can be 'none', 'all' or 'lora_only'"} + ) + use_rslora: bool = field( + default=False, + metadata={ + "help": ( + "When set to True, uses Rank-Stabilized LoRA doi.org/10.48550/arXiv.2312.03732" + " which sets the adapter scaling factor to `lora_alpha/math.sqrt(r)`, since it" + " was proven to work better. Otherwise, it will use the original default" + " value of `lora_alpha/r`." + ) + }, + ) + modules_to_save: Optional[list[str]] = field( + default=None, + metadata={ + "help": "List of modules apart from LoRA layers to be set as trainable and saved in the final checkpoint. " + "For example, in Sequence Classification or Token Classification tasks, " + "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved." + }, + ) + init_lora_weights: bool | Literal["gaussian", "loftq"] = field( + default=True, + metadata={ + "help": ( + "How to initialize the weights of the LoRA layers. Passing True (default) results in the default " + "initialization from the reference implementation from Microsoft. Passing 'gaussian' results " + "in Gaussian initialization scaled by the LoRA rank for linear and layers. Setting the initialization " + "to False leads to completely random initialization and is discouraged." + "Pass `'loftq'` to use LoftQ initialization" + ), + }, + ) + layers_to_transform: Optional[Union[list[int], int]] = field( + default=None, + metadata={ + "help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index. " + "This only works when target_modules is a list of str." + }, + ) + layers_pattern: Optional[Union[list[str], str]] = field( + default=None, + metadata={ + "help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern." + "This only works when target_modules is a list of str." + }, + ) + rank_pattern: Optional[dict] = field( + default_factory=dict, + metadata={ + "help": ( + "The mapping from layer names or regexp expression to ranks which are different from the default rank specified by `r`. " + "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 8`}" + ) + }, + ) + alpha_pattern: Optional[dict] = field( + default_factory=dict, + metadata={ + "help": ( + "The mapping from layer names or regexp expression to alphas which are different from the default alpha specified by `lora_alpha`. " + "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 32`}" + ) + }, + ) + megatron_config: Optional[dict] = field( + default=None, + metadata={ + "help": ( + "The TransformerConfig from Megatron. It is used to create LoRA's parallel linear layer." + "You can get it like this, `core_transformer_config_from_args(get_args())`, " + "these two functions being from Megatron." + "You need to specify this parameter when you want to apply LoRA to the ColumnParallelLinear and " + "RowParallelLinear layers of megatron." + "It should be noted that we may not be able to use the `save_pretrained` and `from_pretrained` " + "functions, because TransformerConfig may not necessarily be serialized." + "But when using megatron, we can use `get_peft_model_state_dict` function and " + "megatron's framework, they can also save and load models and configurations." + ) + }, + ) + megatron_core: Optional[str] = field( + default="megatron.core", + metadata={ + "help": ( + "The core module from Megatron, it is used to create LoRA's parallel linear layer. " + "It only needs to be passed in when you need to use your own modified megatron core module. " + "Otherwise, it will use the default value `megatron.core`. " + ) + }, + ) + # dict type is used when loading config.json + loftq_config: Union[LoftQConfig, dict] = field( + default_factory=dict, + metadata={ + "help": ( + "The configuration of LoftQ. If this is passed, then LoftQ will be used to quantize the backbone " + "weights and initialize Lora layers. Also set `init_lora_weights='loftq'` in this case." + ) + }, + ) + use_dora: bool = field( + default=False, + metadata={ + "help": ( + "Enable 'Weight-Decomposed Low-Rank Adaptation' (DoRA). This technique decomposes the updates of the " + "weights into two parts, magnitude and direction. Direction is handled by normal LoRA, whereas the " + "magnitude is handled by a separate learnable parameter. This can improve the performance of LoRA, " + "especially at low ranks. Right now, DoRA only supports linear and Conv2D layers. DoRA introduces a bigger" + "overhead than pure LoRA, so it is recommended to merge weights for inference. For more information, " + "see https://arxiv.org/abs/2402.09353." + ) + }, + ) + # Enables replicating layers in a model to expand it to a larger model. + layer_replication: Optional[list[tuple[int, int]]] = field( + default=None, + metadata={ + "help": ( + "This enables using LoRA to effectively expand a transformer model to a larger size by repeating some layers. " + "The transformation handles models (currently Llama, Bert or Falcon compatible architectures) with " + "a module list in the model which it modifies to expand the number of modules. " + "Base weights are shared so the memory usage is close to the original model. The intended use is these base weights " + "remain fixed during finetuning but each layer has a separate LoRA adapter so the layers can be specialed via " + "the adapter layers fit during fine tuning." + "The format is a list of [start, end) pairs which specify the layer ranges to stack. For example:\n" + " Original model has 5 layers labelled by their position in the model: `[0, 1, 2, 3, 4]`\n" + " layer_replication: `[[0, 4], [2, 5]]`\n" + " Final model will have this arrangement of original layers: `[0, 1, 2, 3, 2, 3, 4]`\n" + "This format is based on what is used for pass-through merges in mergekit. It makes it simple to select sequential " + "ranges of a model and stack them while reusing layers at either end of each sequence." + ) + }, + ) + + def __post_init__(self): + self.peft_type = PeftType.LORA + self.target_modules = ( + set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules + ) + # if target_modules is a regex expression, then layers_to_transform should be None + if isinstance(self.target_modules, str) and self.layers_to_transform is not None: + raise ValueError("`layers_to_transform` cannot be used when `target_modules` is a str.") + + # if target_modules is a regex expression, then layers_pattern should be None + if isinstance(self.target_modules, str) and self.layers_pattern is not None: + raise ValueError("`layers_pattern` cannot be used when `target_modules` is a str.") + + if self.use_dora and self.megatron_config: + raise ValueError("DoRA does not support megatron_core, please set `use_dora=False`.") + + # handle init_lora_weights and loftq_config + if self.init_lora_weights == "loftq": + import importlib + + if not importlib.util.find_spec("scipy"): + raise ImportError("The required package 'scipy' is not installed. Please install it to continue.") + if self.loftq_config is None: + raise ValueError("`loftq_config` must be specified when `init_lora_weights` is 'loftq'.") + + # convert loftq_config to dict + if self.loftq_config and not isinstance(self.loftq_config, dict): + self.loftq_config = vars(self.loftq_config) diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/layer.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/layer.py new file mode 100644 index 0000000000000000000000000000000000000000..829b7bdf804608d8f415ad2e1ac76ce45a1c23d9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/layer.py @@ -0,0 +1,1066 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import math +import warnings +from typing import Any, Optional, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from transformers.pytorch_utils import Conv1D + +from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge +from peft.utils.integrations import dequantize_bnb_weight, gather_params_ctx +from peft.utils.other import transpose + +from .config import LoraConfig + + +class LoraLayer(BaseTunerLayer): + # All names of layers that may contain (trainable) adapter weights + adapter_layer_names = ("lora_A", "lora_B", "lora_embedding_A", "lora_embedding_B") + # All names of other parameters that may contain adapter-related parameters + other_param_names = ("r", "lora_alpha", "scaling", "lora_dropout") + + def __init__(self, base_layer: nn.Module, **kwargs) -> None: + self.base_layer = base_layer + self.r = {} + self.lora_alpha = {} + self.scaling = {} + self.lora_dropout = nn.ModuleDict({}) + self.lora_A = nn.ModuleDict({}) + self.lora_B = nn.ModuleDict({}) + # For Embedding layer + self.lora_embedding_A = nn.ParameterDict({}) + self.lora_embedding_B = nn.ParameterDict({}) + # Mark the weight as unmerged + self._disable_adapters = False + self.merged_adapters = [] + self.use_dora: dict[str, bool] = {} + self.lora_magnitude_vector: Optional[torch.nn.ParameterDict] = None # for DoRA + self._caches: dict[str, Any] = {} + self.kwargs = kwargs + + base_layer = self.get_base_layer() + if isinstance(base_layer, nn.Linear): + in_features, out_features = base_layer.in_features, base_layer.out_features + elif isinstance(base_layer, nn.Conv2d): + in_features, out_features = base_layer.in_channels, base_layer.out_channels + elif isinstance(base_layer, nn.Embedding): + in_features, out_features = base_layer.num_embeddings, base_layer.embedding_dim + elif isinstance(base_layer, Conv1D): + in_features, out_features = ( + base_layer.weight.ds_shape if hasattr(base_layer.weight, "ds_shape") else base_layer.weight.shape + ) + elif hasattr(base_layer, "infeatures") and hasattr(base_layer, "outfeatures"): + # QuantLinear + in_features, out_features = base_layer.infeatures, base_layer.outfeatures + elif hasattr(base_layer, "input_size") and hasattr(base_layer, "output_size"): + # Megatron ColumnParallelLinear,RowParallelLinear + in_features, out_features = base_layer.input_size, base_layer.output_size + elif hasattr(base_layer, "codebooks") and base_layer.__class__.__name__ == "QuantizedLinear": + # AQLM QuantLinear + in_features, out_features = base_layer.in_features, base_layer.out_features + elif hasattr(base_layer, "w_bit") and base_layer.__class__.__name__ == "WQLinear_GEMM": + # Awq layers + in_features, out_features = base_layer.in_features, base_layer.out_features + else: + raise ValueError(f"Unsupported layer type {type(base_layer)}") + + self.in_features = in_features + self.out_features = out_features + + def update_layer( + self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora: bool = False + ): + # This code works for linear layers, override for other layer types + if r <= 0: + raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") + + self.r[adapter_name] = r + self.lora_alpha[adapter_name] = lora_alpha + if lora_dropout > 0.0: + lora_dropout_layer = nn.Dropout(p=lora_dropout) + else: + lora_dropout_layer = nn.Identity() + + self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer})) + # Actual trainable parameters + self.lora_A[adapter_name] = nn.Linear(self.in_features, r, bias=False) + self.lora_B[adapter_name] = nn.Linear(r, self.out_features, bias=False) + if use_rslora: + self.scaling[adapter_name] = lora_alpha / math.sqrt(r) + else: + self.scaling[adapter_name] = lora_alpha / r + + if init_lora_weights == "loftq": + self.loftq_init(adapter_name) + elif init_lora_weights: + self.reset_lora_parameters(adapter_name, init_lora_weights) + + # check weight and qweight (for GPTQ) + for weight_name in ("weight", "qweight"): + weight = getattr(self.get_base_layer(), weight_name, None) + if weight is not None: + # the layer is already completely initialized, this is an update + if weight.dtype.is_floating_point or weight.dtype.is_complex: + self.to(weight.device, dtype=weight.dtype) + else: + self.to(weight.device) + break + + if use_dora: + self.dora_init(adapter_name) + self.use_dora[adapter_name] = True + else: + self.use_dora[adapter_name] = False + + self.set_adapter(self.active_adapters) + + def reset_lora_parameters(self, adapter_name, init_lora_weights): + if init_lora_weights is False: + return + + if adapter_name in self.lora_A.keys(): + if init_lora_weights is True: + # initialize A the same way as the default for nn.Linear and B to zero + # https://github.com/microsoft/LoRA/blob/a0a92e0f26c067cf94747bdbf1ce73793fa44d19/loralib/layers.py#L124 + nn.init.kaiming_uniform_(self.lora_A[adapter_name].weight, a=math.sqrt(5)) + elif init_lora_weights.lower() == "gaussian": + nn.init.normal_(self.lora_A[adapter_name].weight, std=1 / self.r[adapter_name]) + else: + raise ValueError(f"Unknown initialization {init_lora_weights=}") + nn.init.zeros_(self.lora_B[adapter_name].weight) + if adapter_name in self.lora_embedding_A.keys(): + # initialize a the same way as the default for nn.linear and b to zero + nn.init.zeros_(self.lora_embedding_A[adapter_name]) + nn.init.normal_(self.lora_embedding_B[adapter_name]) + + def loftq_init(self, adapter_name): + from peft.utils.loftq_utils import loftq_init + + weight = self.get_base_layer().weight + kwargs = { + "num_bits": self.kwargs.get("loftq_bits", 4), + "reduced_rank": self.r[adapter_name], + "num_iter": self.kwargs.get("loftq_iter", 1), + } + + qweight, lora_A, lora_B = loftq_init(weight, **kwargs) + if adapter_name in self.lora_A.keys(): + # initialize A the same way as the default for nn.Linear and B to zero + self.lora_A[adapter_name].weight.data = lora_A + self.lora_B[adapter_name].weight.data = lora_B + if adapter_name in self.lora_embedding_A.keys(): + # initialize a the same way as the default for nn.linear and b to zero + self.lora_embedding_A[adapter_name].weight.data = lora_A + self.lora_embedding_B[adapter_name].weight.data = lora_B + self.get_base_layer().weight.data = qweight + + def _get_weight_norm(self, weight, lora_weight, scaling) -> torch.Tensor: + # calculate L2 norm of weight matrix, column-wise + weight = weight + scaling * lora_weight + weight_norm = torch.linalg.norm(weight, dim=1).to(weight.dtype) + return weight_norm + + def dora_init(self, adapter_name: str) -> None: + lora_A = self.lora_A[adapter_name] + lora_B = self.lora_B[adapter_name] + scaling = self.scaling[adapter_name] + with gather_params_ctx(self.get_base_layer()): + weight = self.get_base_layer().weight + quant_state = getattr(self.get_base_layer(), "state", None) + weight = dequantize_bnb_weight(weight, state=quant_state) # no-op if not bnb + if weight.data.ndim == 4: # For handling LoRAs applied to Conv2Ds. + lora_weight = torch.mm(lora_B.weight.flatten(start_dim=1), lora_A.weight.flatten(start_dim=1)) + lora_weight = lora_weight.reshape(weight.shape) + else: + lora_weight = lora_B.weight @ lora_A.weight + weight_norm = self._get_weight_norm(weight, lora_weight, scaling) + self.lora_magnitude_vector = nn.ParameterDict() + self.lora_magnitude_vector[adapter_name] = nn.Parameter(weight_norm, requires_grad=True) + # add lora_magnitude_vector to the list of learnable parameters + self.adapter_layer_names = self.adapter_layer_names[:] + ("lora_magnitude_vector",) + + def _cache_store(self, key: str, value: Any) -> None: + self._caches[key] = value + + def _cache_pop(self, key: str) -> Any: + value = self._caches.pop(key) + return value + + def _apply_dora(self, x, lora_A, lora_B, scaling, active_adapter): + """ + For DoRA, calculate the extra output from LoRA with DoRA applied. This should be added on top of the base layer + output. + """ + lora_weight = lora_B.weight @ lora_A.weight + magnitude = self.lora_magnitude_vector[active_adapter] + weight = self.get_base_layer().weight + quant_state = getattr(self.get_base_layer(), "state", None) + weight = dequantize_bnb_weight(weight, state=quant_state) # no-op if not bnb + weight = weight.to(x.dtype) + weight_norm = self._get_weight_norm(weight, lora_weight, scaling) + # see section 4.3 of DoRA (https://arxiv.org/abs/2402.09353) + # "[...] we suggest treating ||V +∆V ||_c in + # Eq. (5) as a constant, thereby detaching it from the gradient + # graph. This means that while ||V + ∆V ||_c dynamically + # reflects the updates of ∆V , it won’t receive any gradient + # during backpropagation" + weight_norm = weight_norm.detach() + mag_norm_scale = (magnitude / weight_norm).view(1, -1) + result_dora = (mag_norm_scale - 1) * ( + F.linear(x, transpose(weight, self.fan_in_fan_out)) + ) + mag_norm_scale * lora_B(lora_A(x)) * scaling + + # Note: Computation could potentially be accelerated by using the code below instead of calculating X@W again. + # This is only correct if dropout=0, otherwise results will differ: + # https://github.com/huggingface/peft/pull/1474#issuecomment-1964682771 + # bias = self.get_base_layer().bias + # if bias is not None: + # result = result - bias + # result = mag_norm_scale * result + mag_norm_scale * lora_B(lora_A(x)) * scaling + # if bias is not None: + # result = result + bias + + return result_dora + + def set_scale(self, adapter, scale): + if adapter not in self.scaling: + # Ignore the case where the adapter is not in the layer + return + self.scaling[adapter] = scale * self.lora_alpha[adapter] / self.r[adapter] + + def scale_layer(self, scale: float) -> None: + if scale == 1: + return + + for active_adapter in self.active_adapters: + if active_adapter not in self.lora_A.keys(): + continue + + self.scaling[active_adapter] *= scale + + def unscale_layer(self, scale=None) -> None: + for active_adapter in self.active_adapters: + if active_adapter not in self.lora_A.keys(): + continue + + if scale is None: + self.scaling[active_adapter] = self.lora_alpha[active_adapter] / self.r[active_adapter] + else: + self.scaling[active_adapter] /= scale + + def _check_forward_args(self, x, *args, **kwargs): + """Check if the arguments are compatible with the configs and state of the model""" + adapter_names = kwargs.get("adapter_names", None) + if adapter_names is None: + return + + if len(x) != len(adapter_names): + msg = ( + "Length of `adapter_names` should be the same as the number of inputs, but got " + f"{len(adapter_names)} and {len(x)} respectively." + ) + raise ValueError(msg) + + if self.merged: + # It is unclear what would be the right thing to do if users pass adapter_names and there are merged + # adapters. Therefore, it is better to raise an error in this case. + msg = "Cannot pass `adapter_names` when there are merged adapters, please call `unmerge_adapter` first." + raise ValueError(msg) + + unique_adapters = set(self.active_adapters) + for adapter_name in unique_adapters: + if self.use_dora.get(adapter_name, False): + msg = "Cannot pass `adapter_names` when DoRA is enabled." + raise ValueError(msg) + + def _mixed_batch_forward( + self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any + ) -> torch.Tensor: + # This is a special method that handles the case when users pass the argument `adapter_names`. This is an + # extra argument that allows mixing different adapters in the same batch at inference time. + result = self.base_layer(x, *args, **kwargs) + torch_result_dtype = result.dtype + + unique_adapters = set(adapter_names) + sub_batch_indices_list = [] + for adapter in unique_adapters: + sub_batch_indices_list.append([index for index, item in enumerate(adapter_names) if item == adapter]) + + for i, active_adapter in enumerate(unique_adapters): + if active_adapter == "__base__": + continue + if active_adapter not in self.lora_A.keys(): + continue + + lora_A = self.lora_A[active_adapter] + lora_B = self.lora_B[active_adapter] + dropout = self.lora_dropout[active_adapter] + scaling = self.scaling[active_adapter] + + # getting the sub-batch, passing it to LoRA layers and updating the corresponding indices of the linear + # layer output + sub_batch = x[sub_batch_indices_list[i]].to(lora_A.weight.dtype) + lora_output = lora_B(lora_A(dropout(sub_batch))) * scaling + result[sub_batch_indices_list[i]] += lora_output.to(torch_result_dtype) + + return result + + +# Below code is based on https://github.com/microsoft/LoRA/blob/main/loralib/layers.py +# and modified to work with PyTorch FSDP + + +# ------------------------------------------------------------------------------------------ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information. +# ------------------------------------------------------------------------------------------ + + +class Linear(nn.Module, LoraLayer): + # Lora implemented in a dense layer + def __init__( + self, + base_layer, + adapter_name: str, + r: int = 0, + lora_alpha: int = 1, + lora_dropout: float = 0.0, + fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out) + is_target_conv_1d_layer: bool = False, + init_lora_weights: Union[bool, str] = True, + use_rslora: bool = False, + use_dora: bool = False, + **kwargs, + ) -> None: + super().__init__() + LoraLayer.__init__(self, base_layer, **kwargs) + self.fan_in_fan_out = fan_in_fan_out + + self._active_adapter = adapter_name + self.update_layer( + adapter_name, + r, + lora_alpha=lora_alpha, + lora_dropout=lora_dropout, + init_lora_weights=init_lora_weights, + use_rslora=use_rslora, + use_dora=use_dora, + ) + self.is_target_conv_1d_layer = is_target_conv_1d_layer + + def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: + """ + Merge the active adapter weights into the base weights + + Args: + safe_merge (`bool`, *optional*): + If True, the merge operation will be performed in a copy of the original weights and check for NaNs + before merging the weights. This is useful if you want to check if the merge operation will produce + NaNs. Defaults to `False`. + adapter_names (`list[str]`, *optional*): + The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults + to `None`. + """ + adapter_names = check_adapters_to_merge(self, adapter_names) + if not adapter_names: + # no adapter to merge + return + + for active_adapter in adapter_names: + if active_adapter in self.lora_A.keys(): + base_layer = self.get_base_layer() + if safe_merge: + # Note that safe_merge will be slower than the normal merge + # because of the copy operation. + orig_weights = base_layer.weight.data.clone() + delta_weight = self.get_delta_weight(active_adapter) + if not self.use_dora[active_adapter]: + orig_weights = orig_weights + delta_weight + else: + # handle dora + # since delta_weight already includes scaling, set it to 1 here + weight_norm = self._get_weight_norm(orig_weights, delta_weight, scaling=1).detach() + # We need to cache weight_norm because it has to be based on the original weights. We + # cannot calculate it on the fly based on the merged weights when unmerging because its a + # different value + self._cache_store(f"{active_adapter}-weight_norm", weight_norm) + dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm + orig_weights = dora_factor.view(-1, 1) * (orig_weights + delta_weight) + + if not torch.isfinite(orig_weights).all(): + raise ValueError( + f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" + ) + + base_layer.weight.data = orig_weights + else: + delta_weight = self.get_delta_weight(active_adapter) + if not self.use_dora[active_adapter]: + base_layer.weight.data = base_layer.weight.data + delta_weight + else: + # handle dora + # since delta_weight already includes scaling, set it to 1 here + weight_norm = self._get_weight_norm(base_layer.weight, delta_weight, scaling=1).detach() + # We need to cache weight_norm because it has to be based on the original weights. We + # cannot calculate it on the fly based on the merged weights when unmerging because its a + # different value + self._cache_store(f"{active_adapter}-weight_norm", weight_norm) + dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm + new_weight = dora_factor.view(-1, 1) * (base_layer.weight.data + delta_weight) + base_layer.weight.data = new_weight + + self.merged_adapters.append(active_adapter) + + def unmerge(self) -> None: + """ + This method unmerges all merged adapter layers from the base weights. + """ + if not self.merged: + warnings.warn("Already unmerged. Nothing to do.") + return + while len(self.merged_adapters) > 0: + active_adapter = self.merged_adapters.pop() + if active_adapter in self.lora_A.keys(): + weight = self.get_base_layer().weight + delta_weight = self.get_delta_weight(active_adapter) + if not self.use_dora[active_adapter]: + weight.data -= delta_weight + else: + weight_norm = self._cache_pop(f"{active_adapter}-weight_norm") + dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm + weight_orig = weight.data / dora_factor.view(-1, 1) - delta_weight + weight.data = weight_orig + + def get_delta_weight(self, adapter) -> torch.Tensor: + """ + Compute the delta weight for the given adapter. + + Args: + adapter (str): + The name of the adapter for which the delta weight should be computed. + """ + device = self.lora_B[adapter].weight.device + dtype = self.lora_B[adapter].weight.dtype + + # In case users wants to merge the adapter weights that are in + # float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to + # float16 because the `@` and matmul operation in general is not supported in torch + cpu + fp16. + cast_to_fp32 = device.type == "cpu" and dtype == torch.float16 + + weight_A = self.lora_A[adapter].weight + weight_B = self.lora_B[adapter].weight + + if cast_to_fp32: + weight_A = weight_A.float() + weight_B = weight_B.float() + + output_tensor = transpose(weight_B @ weight_A, self.fan_in_fan_out) * self.scaling[adapter] + + if cast_to_fp32: + output_tensor = output_tensor.to(dtype=dtype) + + # cast back the weights + self.lora_A[adapter].weight.data = weight_A.to(dtype) + self.lora_B[adapter].weight.data = weight_B.to(dtype) + + return output_tensor + + def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: + self._check_forward_args(x, *args, **kwargs) + adapter_names = kwargs.pop("adapter_names", None) + + if self.disable_adapters: + if self.merged: + self.unmerge() + result = self.base_layer(x, *args, **kwargs) + elif adapter_names is not None: + result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs) + elif self.merged: + result = self.base_layer(x, *args, **kwargs) + else: + result = self.base_layer(x, *args, **kwargs) + torch_result_dtype = result.dtype + for active_adapter in self.active_adapters: + if active_adapter not in self.lora_A.keys(): + continue + lora_A = self.lora_A[active_adapter] + lora_B = self.lora_B[active_adapter] + dropout = self.lora_dropout[active_adapter] + scaling = self.scaling[active_adapter] + x = x.to(lora_A.weight.dtype) + + if not self.use_dora[active_adapter]: + result = result + lora_B(lora_A(dropout(x))) * scaling + else: + x = dropout(x) + result = result + self._apply_dora(x, lora_A, lora_B, scaling, active_adapter) + + result = result.to(torch_result_dtype) + + return result + + def __repr__(self) -> str: + rep = super().__repr__() + return "lora." + rep + + +class Embedding(nn.Module, LoraLayer): + # LoRA implemented in a Embedding layer + def __init__( + self, + base_layer: nn.Module, + adapter_name: str, + r: int = 0, + lora_alpha: int = 1, + lora_dropout: float = 0.0, + init_lora_weights: Union[bool, str] = True, + use_rslora: bool = False, + use_dora: bool = False, + **kwargs, + ) -> None: + super().__init__() + LoraLayer.__init__(self, base_layer) + + if use_dora: + raise ValueError(f"{self.__class__.__name__} does not support DoRA yet, please set it to False") + + self._active_adapter = adapter_name + self.update_layer( + adapter_name, + r, + lora_alpha=lora_alpha, + lora_dropout=lora_dropout, + init_lora_weights=init_lora_weights, + use_rslora=use_rslora, + use_dora=use_dora, + ) + + def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora): + if r <= 0: + raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") + + self.r[adapter_name] = r + self.lora_alpha[adapter_name] = lora_alpha + if lora_dropout > 0.0: + lora_dropout_layer = nn.Dropout(p=lora_dropout) + else: + lora_dropout_layer = nn.Identity() + + self.lora_dropout[adapter_name] = lora_dropout_layer + # Actual trainable parameters + weight_A = torch.randn((r, self.in_features)) + weight_B = torch.randn((self.out_features, r)) + self.lora_embedding_A[adapter_name] = nn.Parameter(weight_A) + self.lora_embedding_B[adapter_name] = nn.Parameter(weight_B) + if use_rslora: + self.scaling[adapter_name] = lora_alpha / math.sqrt(r) + else: + self.scaling[adapter_name] = lora_alpha / r + + if init_lora_weights == "loftq": + self.loftq_init(adapter_name) + elif init_lora_weights: + self.reset_lora_parameters(adapter_name, init_lora_weights) + + base_layer = self.get_base_layer() + weight = getattr(base_layer, "weight", None) + if weight is not None: + # the layer is already completely initialized, this is an update + self.to(base_layer.weight.device, dtype=weight.dtype) + + self.set_adapter(self.active_adapters) + + def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: + """ + Merge the active adapter weights into the base weights + + Args: + safe_merge (`bool`, *optional*): + If True, the merge operation will be performed in a copy of the original weights and check for NaNs + before merging the weights. This is useful if you want to check if the merge operation will produce + NaNs. Defaults to `False`. + adapter_names (`list[str]`, *optional*): + The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults + to `None`. + """ + adapter_names = check_adapters_to_merge(self, adapter_names) + if not adapter_names: + # no adapter to merge + return + + for active_adapter in adapter_names: + if active_adapter in self.lora_embedding_A.keys(): + base_layer = self.get_base_layer() + if safe_merge: + # Note that safe_merge will be slower than the normal merge + # because of the copy operation. + orig_weights = base_layer.weight.data.clone() + orig_weights = orig_weights + self.get_delta_weight(active_adapter) + + if not torch.isfinite(orig_weights).all(): + raise ValueError( + f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" + ) + + base_layer.weight.data = orig_weights + else: + base_layer.weight.data = base_layer.weight.data + self.get_delta_weight(active_adapter) + self.merged_adapters.append(active_adapter) + + def unmerge(self) -> None: + """ + This method unmerges all merged adapter layers from the base weights. + """ + if not self.merged: + warnings.warn("Already unmerged. Nothing to do.") + return + while len(self.merged_adapters) > 0: + active_adapter = self.merged_adapters.pop() + if active_adapter in self.lora_embedding_A.keys(): + self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter) + + def get_delta_weight(self, adapter) -> torch.Tensor: + """ + Compute the delta weight for the given adapter. + + Args: + adapter (str): + The name of the adapter for which the delta weight should be computed. + """ + device = self.lora_embedding_B[adapter].device + dtype = self.lora_embedding_A[adapter].dtype + + # In case users wants to merge the adapter weights that are in + # float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to + # float16 because the `@` and matmul operation in general is not supported in torch + cpu + fp16. + cast_to_fp32 = device.type == "cpu" and dtype == torch.float16 + + weight_A = self.lora_embedding_A[adapter] + weight_B = self.lora_embedding_B[adapter] + + if cast_to_fp32: + weight_A = weight_A.float() + weight_B = weight_B.float() + + output_tensor = transpose(weight_B @ weight_A, True) * self.scaling[adapter] + + if cast_to_fp32: + output_tensor = output_tensor.to(dtype=dtype) + + # cast back the weights + self.lora_embedding_A[adapter] = weight_A.to(dtype) + self.lora_embedding_B[adapter] = weight_B.to(dtype) + + return output_tensor + + def _mixed_batch_forward( + self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any + ) -> torch.Tensor: + # This is a special method that handles the case when users pass the argument `adapter_names`. This is an + # extra argument that allows mixing different adapters in the same batch at inference time. + result = self.base_layer(x, *args, **kwargs) + + unique_adapters = set(adapter_names) + sub_batch_indices_list = [] + for adapter in unique_adapters: + sub_batch_indices_list.append([index for index, item in enumerate(adapter_names) if item == adapter]) + + for i, active_adapter in enumerate(unique_adapters): + if active_adapter == "__base__": + continue + if active_adapter not in self.lora_embedding_A.keys(): + continue + + embedding_A = self.lora_embedding_A[active_adapter].T + embedding_B = self.lora_embedding_B[active_adapter].T + scaling = self.scaling[active_adapter] + + # getting the sub-batch, passing it to LoRA layers and updating the corresponding indices of the linear + # layer output + sub_batch = x[sub_batch_indices_list[i]] + after_A = self._embed(sub_batch, embedding_A) + result[sub_batch_indices_list[i]] += (after_A @ embedding_B) * scaling + + return result + + def _embed(self, input: torch.Tensor, weight: torch.Tensor) -> torch.Tensor: + base_layer = self.get_base_layer() + return F.embedding( + input, + weight, + padding_idx=base_layer.padding_idx, + max_norm=base_layer.max_norm, + norm_type=base_layer.norm_type, + scale_grad_by_freq=base_layer.scale_grad_by_freq, + sparse=base_layer.sparse, + ) + + def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: + # TODO: no dtype conversion here, unlike in Linear, is that correct? + self._check_forward_args(x, *args, **kwargs) + adapter_names = kwargs.pop("adapter_names", None) + + if self.disable_adapters: + if self.merged: + self.unmerge() + result = self.base_layer(x, *args, **kwargs) + elif adapter_names is not None: + result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs) + elif self.merged: + result = self.base_layer(x, *args, **kwargs) + else: + result = self.base_layer(x, *args, **kwargs) + torch_result_dtype = result.dtype + for active_adapter in self.active_adapters: + if active_adapter not in self.lora_embedding_A: + continue + embedding_A = self.lora_embedding_A[active_adapter].T + embedding_B = self.lora_embedding_B[active_adapter].T + scaling = self.scaling[active_adapter] + after_A = self._embed(x, embedding_A) + result = result + (after_A @ embedding_B) * scaling + result = result.to(torch_result_dtype) + + return result + + def __repr__(self) -> str: + rep = super().__repr__() + return "lora." + rep + + +class Conv2d(nn.Module, LoraLayer): + # Lora implemented in a conv2d layer + def __init__( + self, + base_layer: nn.Module, + adapter_name: str, + r: int = 0, + lora_alpha: int = 1, + lora_dropout: float = 0.0, + init_lora_weights: Union[bool, str] = True, + use_rslora: bool = False, + use_dora: bool = False, + **kwargs, + ) -> None: + super().__init__() + LoraLayer.__init__(self, base_layer) + + self._active_adapter = adapter_name + self.update_layer( + adapter_name, + r, + lora_alpha=lora_alpha, + lora_dropout=lora_dropout, + init_lora_weights=init_lora_weights, + use_rslora=use_rslora, + use_dora=use_dora, + ) + + def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora): + if r <= 0: + raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") + + self.r[adapter_name] = r + self.lora_alpha[adapter_name] = lora_alpha + if lora_dropout > 0.0: + lora_dropout_layer = nn.Dropout(p=lora_dropout) + else: + lora_dropout_layer = nn.Identity() + + self.lora_dropout[adapter_name] = lora_dropout_layer + # Actual trainable parameters + base_layer = self.get_base_layer() + kernel_size = base_layer.kernel_size + stride = base_layer.stride + padding = base_layer.padding + self.lora_A[adapter_name] = nn.Conv2d(self.in_features, r, kernel_size, stride, padding, bias=False) + self.lora_B[adapter_name] = nn.Conv2d(r, self.out_features, (1, 1), (1, 1), bias=False) + if use_rslora: + self.scaling[adapter_name] = lora_alpha / math.sqrt(r) + else: + self.scaling[adapter_name] = lora_alpha / r + + if init_lora_weights == "loftq": + self.loftq_init(adapter_name) + elif init_lora_weights: + self.reset_lora_parameters(adapter_name, init_lora_weights) + + weight = getattr(base_layer, "weight", None) + if weight is not None: + # the layer is already completely initialized, this is an update + self.to(base_layer.weight.device, dtype=weight.dtype) + + if use_dora: + self.dora_init(adapter_name) + self.use_dora[adapter_name] = True + else: + self.use_dora[adapter_name] = False + + self.set_adapter(self.active_adapters) + + def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: + """ + Merge the active adapter weights inside the base weights + + Args: + safe_merge (`bool`, *optional*): + If True, the merge operation will be performed in a copy of the original weights and check for NaNs + before merging the weights. This is useful if you want to check if the merge operation will produce + NaNs. Defaults to `False`. + adapter_names (`list[str]`, *optional*): + The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults + to `None`. + """ + adapter_names = check_adapters_to_merge(self, adapter_names) + if not adapter_names: + # no adapter to merge + return + + for active_adapter in adapter_names: + if active_adapter in self.lora_A.keys(): + base_layer = self.get_base_layer() + if safe_merge: + # Note that safe_merge will be slower than the normal merge + # because of the copy operation. + orig_weights = base_layer.weight.data.clone() + delta_weight = self.get_delta_weight(active_adapter) + + if not self.use_dora[active_adapter]: + orig_weights = orig_weights + delta_weight + else: + # handle dora + # since delta_weight already includes scaling, set it to 1 here + weight_norm = self._get_weight_norm(orig_weights, delta_weight, scaling=1).detach() + # We need to cache weight_norm because it has to be based on the original weights. We + # cannot calculate it on the fly based on the merged weights when unmerging because its a + # different value + self._cache_store(f"{active_adapter}-weight_norm", weight_norm) + dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm + orig_weights = dora_factor.view(-1, 1, 1, 1) * (orig_weights + delta_weight) + + if not torch.isfinite(orig_weights).all(): + raise ValueError( + f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" + ) + base_layer.weight.data = orig_weights + else: + delta_weight = self.get_delta_weight(active_adapter) + if not self.use_dora[active_adapter]: + base_layer.weight.data = base_layer.weight.data + delta_weight + else: + # handle dora + # since delta_weight already includes scaling, set it to 1 here + weight_norm = self._get_weight_norm(base_layer.weight, delta_weight, scaling=1).detach() + # We need to cache weight_norm because it has to be based on the original weights. We + # cannot calculate it on the fly based on the merged weights when unmerging because its a + # different value + self._cache_store(f"{active_adapter}-weight_norm", weight_norm) + dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm + new_weight = dora_factor.view(-1, 1, 1, 1) * (base_layer.weight.data + delta_weight) + base_layer.weight.data = new_weight + + self.merged_adapters.append(active_adapter) + + def unmerge(self) -> None: + """ + This method unmerges all merged adapter layers from the base weights. + """ + if not self.merged: + warnings.warn("Already unmerged. Nothing to do.") + return + while len(self.merged_adapters) > 0: + active_adapter = self.merged_adapters.pop() + if active_adapter in self.lora_A.keys(): + weight = self.get_base_layer().weight + delta_weight = self.get_delta_weight(active_adapter) + if not self.use_dora[active_adapter]: + weight.data -= delta_weight + else: + weight_norm = self._cache_pop(f"{active_adapter}-weight_norm") + dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm + weight_orig = weight.data / dora_factor.view(-1, 1, 1, 1) - delta_weight + weight.data = weight_orig + + def get_delta_weight(self, adapter) -> torch.Tensor: + """ + Compute the delta weight for the given adapter. + + Args: + adapter (str): + The name of the adapter for which the delta weight should be computed. + """ + device = self.lora_B[adapter].weight.device + dtype = self.lora_A[adapter].weight.dtype + + # In case users wants to merge the adapter weights that are in + # float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to + # float16 because the `@` and matmul operation in general is not supported in torch + cpu + fp16. + cast_to_fp32 = device.type == "cpu" and dtype == torch.float16 + + weight_A = self.lora_A[adapter].weight + weight_B = self.lora_B[adapter].weight + + if cast_to_fp32: + weight_A = weight_A.float() + weight_B = weight_B.float() + + # https://github.com/bmaltais/kohya_ss/blob/feb6728762a8f463d15ba936d189d4c3abfaa1ab/networks/lora.py#L117 + if self.get_base_layer().weight.size()[2:4] == (1, 1): + # conv2d 1x1 + output_tensor = (weight_B.squeeze(3).squeeze(2) @ weight_A.squeeze(3).squeeze(2)).unsqueeze(2).unsqueeze( + 3 + ) * self.scaling[adapter] + else: + # conv2d 3x3 + output_tensor = ( + F.conv2d( + weight_A.permute(1, 0, 2, 3), + weight_B, + ).permute(1, 0, 2, 3) + * self.scaling[adapter] + ) + + if cast_to_fp32: + output_tensor = output_tensor.to(dtype=dtype) + + # cast back the weights + self.lora_A[adapter].weight.data = weight_A.to(dtype) + self.lora_B[adapter].weight.data = weight_B.to(dtype) + + return output_tensor + + def _get_weight_norm(self, weight, lora_weight, scaling) -> torch.Tensor: + # calculate L2 norm of weight matrix, channel-wise + weight = weight + scaling * lora_weight + # the following is needed to have compatibility with the 4D weight tensors of Conv2D + weight_norm = weight.norm(p=2, dim=(1, 2, 3), keepdim=True).transpose(1, 0) + return weight_norm + + def _apply_dora(self, x, lora_A, lora_B, scaling, active_adapter): + """ + For DoRA, calculate the extra output from LoRA with DoRA applied. This should be added on top of the base layer + output. + """ + base_layer = self.get_base_layer() + weight = base_layer.weight + lora_weight = torch.mm(lora_B.weight.flatten(start_dim=1), lora_A.weight.flatten(start_dim=1)) + lora_weight = lora_weight.reshape(weight.shape) + magnitude = self.lora_magnitude_vector[active_adapter] + weight_norm = self._get_weight_norm(weight, lora_weight, scaling) + # see section 4.3 of DoRA (https://arxiv.org/abs/2402.09353) + # "[...] we suggest treating ||V +∆V ||_c in + # Eq. (5) as a constant, thereby detaching it from the gradient + # graph. This means that while ||V + ∆V ||_c dynamically + # reflects the updates of ∆V , it won’t receive any gradient + # during backpropagation" + weight_norm = weight_norm.detach() + mag_norm_scale = magnitude / weight_norm + result_dora = (mag_norm_scale - 1) * ( + F.conv2d( + x, + weight, + bias=None, + stride=base_layer.stride, + padding=base_layer.padding, + dilation=base_layer.dilation, + groups=base_layer.groups, + ) + ) + mag_norm_scale * lora_B(lora_A(x)) * scaling + + return result_dora + + def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: + self._check_forward_args(x, *args, **kwargs) + adapter_names = kwargs.pop("adapter_names", None) + + if self.disable_adapters: + if self.merged: + self.unmerge() + result = self.base_layer(x, *args, **kwargs) + elif adapter_names is not None: + result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs) + elif self.merged: + result = self.base_layer(x, *args, **kwargs) + else: + result = self.base_layer(x, *args, **kwargs) + torch_result_dtype = result.dtype + + for active_adapter in self.active_adapters: + if active_adapter not in self.lora_A.keys(): + continue + lora_A = self.lora_A[active_adapter] + lora_B = self.lora_B[active_adapter] + dropout = self.lora_dropout[active_adapter] + scaling = self.scaling[active_adapter] + x = x.to(lora_A.weight.dtype) + + if not self.use_dora[active_adapter]: + result = result + lora_B(lora_A(dropout(x))) * scaling + else: + x = dropout(x) + result = result + self._apply_dora(x, lora_A, lora_B, scaling, active_adapter) + + result = result.to(torch_result_dtype) + return result + + def __repr__(self) -> str: + rep = super().__repr__() + return "lora." + rep + + +def dispatch_default( + target: torch.nn.Module, + adapter_name: str, + lora_config: LoraConfig, + **kwargs, +) -> Optional[torch.nn.Module]: + new_module = None + + if isinstance(target, BaseTunerLayer): + target_base_layer = target.get_base_layer() + else: + target_base_layer = target + + if isinstance(target_base_layer, torch.nn.Embedding): + embedding_kwargs = kwargs.copy() + embedding_kwargs.pop("fan_in_fan_out", None) + embedding_kwargs.update(lora_config.loftq_config) + new_module = Embedding(target, adapter_name, **embedding_kwargs) + elif isinstance(target_base_layer, torch.nn.Conv2d): + kwargs.update(lora_config.loftq_config) + new_module = Conv2d(target, adapter_name, **kwargs) + elif isinstance(target_base_layer, torch.nn.Linear): + if kwargs["fan_in_fan_out"]: + warnings.warn( + "fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. " + "Setting fan_in_fan_out to False." + ) + kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False + kwargs.update(lora_config.loftq_config) + new_module = Linear(target, adapter_name, **kwargs) + elif isinstance(target_base_layer, Conv1D): + if not kwargs["fan_in_fan_out"]: + warnings.warn( + "fan_in_fan_out is set to False but the target module is `Conv1D`. " "Setting fan_in_fan_out to True." + ) + kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = True + kwargs.update(lora_config.loftq_config) + new_module = Linear(target, adapter_name, is_target_conv_1d_layer=True, **kwargs) + + return new_module diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/model.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/model.py new file mode 100644 index 0000000000000000000000000000000000000000..3f381efffd7f97c458764c40df9a0feb0ef819ab --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/model.py @@ -0,0 +1,793 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import math +import operator +import re +import warnings +from contextlib import contextmanager +from dataclasses import asdict, replace +from enum import Enum +from functools import partial, reduce +from itertools import chain +from typing import Literal, Optional + +import torch +from torch import nn +from tqdm import tqdm + +from peft.import_utils import is_bnb_4bit_available, is_bnb_available +from peft.tuners.tuners_utils import ( + BaseTuner, + BaseTunerLayer, + check_target_module_exists, + onload_layer, + replicate_layers, +) +from peft.utils import ( + TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, + ModulesToSaveWrapper, + _freeze_adapter, + _get_submodules, + get_quantization_config, +) +from peft.utils.merge_utils import dare_linear, dare_ties, magnitude_prune, task_arithmetic, ties + +from .aqlm import dispatch_aqlm +from .awq import dispatch_awq +from .config import LoraConfig +from .gptq import dispatch_gptq +from .layer import Conv2d, LoraLayer, dispatch_default +from .tp_layer import dispatch_megatron + + +def _adapter_names_pre_forward_hook(target, args, kwargs, adapter_names): + # pre-forward hook to inject the adapter_names argument when using mixed adapter batches inference + kwargs["adapter_names"] = adapter_names + return args, kwargs + + +class LoraModel(BaseTuner): + """ + Creates Low Rank Adapter (LoRA) model from a pretrained transformers model. + + The method is described in detail in https://arxiv.org/abs/2106.09685. + + Args: + model ([`torch.nn.Module`]): The model to be adapted. + config ([`LoraConfig`]): The configuration of the Lora model. + adapter_name (`str`): The name of the adapter, defaults to `"default"`. + + Returns: + `torch.nn.Module`: The Lora model. + + Example: + + ```py + >>> from transformers import AutoModelForSeq2SeqLM + >>> from peft import LoraModel, LoraConfig + + >>> config = LoraConfig( + ... task_type="SEQ_2_SEQ_LM", + ... r=8, + ... lora_alpha=32, + ... target_modules=["q", "v"], + ... lora_dropout=0.01, + ... ) + + >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") + >>> lora_model = LoraModel(model, config, "default") + ``` + + ```py + >>> import torch + >>> import transformers + >>> from peft import LoraConfig, PeftModel, get_peft_model, prepare_model_for_kbit_training + + >>> rank = ... + >>> target_modules = ["q_proj", "k_proj", "v_proj", "out_proj", "fc_in", "fc_out", "wte"] + >>> config = LoraConfig( + ... r=4, lora_alpha=16, target_modules=target_modules, lora_dropout=0.1, bias="none", task_type="CAUSAL_LM" + ... ) + >>> quantization_config = transformers.BitsAndBytesConfig(load_in_8bit=True) + + >>> tokenizer = transformers.AutoTokenizer.from_pretrained( + ... "kakaobrain/kogpt", + ... revision="KoGPT6B-ryan1.5b-float16", # or float32 version: revision=KoGPT6B-ryan1.5b + ... bos_token="[BOS]", + ... eos_token="[EOS]", + ... unk_token="[UNK]", + ... pad_token="[PAD]", + ... mask_token="[MASK]", + ... ) + >>> model = transformers.GPTJForCausalLM.from_pretrained( + ... "kakaobrain/kogpt", + ... revision="KoGPT6B-ryan1.5b-float16", # or float32 version: revision=KoGPT6B-ryan1.5b + ... pad_token_id=tokenizer.eos_token_id, + ... use_cache=False, + ... device_map={"": rank}, + ... torch_dtype=torch.float16, + ... quantization_config=quantization_config, + ... ) + >>> model = prepare_model_for_kbit_training(model) + >>> lora_model = get_peft_model(model, config) + ``` + + **Attributes**: + - **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted. + - **peft_config** ([`LoraConfig`]): The configuration of the Lora model. + """ + + prefix: str = "lora_" + + def __init__(self, model, config, adapter_name) -> None: + super().__init__(model, config, adapter_name) + + def _check_new_adapter_config(self, config: LoraConfig) -> None: + """ + A helper method to check the config when a new adapter is being added. + + Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters. + + """ + # TODO: there should be a check if any of the existing adapters actually has bias != "none", or else the check + # does not fully correspond to the error message. + if (len(self.peft_config) > 1) and (config.bias != "none"): + raise ValueError( + f"{self.__class__.__name__} supports only 1 adapter with bias. When using multiple adapters, " + "set bias to 'none' for all adapters." + ) + + @staticmethod + def _check_target_module_exists(lora_config, key): + return check_target_module_exists(lora_config, key) + + def _prepare_model(self, peft_config: LoraConfig, model: nn.Module): + r""" + A private method to modify the model structure before adapter is applied. + + Args: + peft_config (`PeftConfig`): + The prepared adapter config. + model (`nn.Module`): + The model that is going to be adapted. + """ + if peft_config.layer_replication: + replicate_layers(model, peft_config.layer_replication) + + def _create_and_replace( + self, + lora_config, + adapter_name, + target, + target_name, + parent, + current_key, + ): + if current_key is None: + raise ValueError("Current Key shouldn't be `None`") + + # Regexp matching - Find key which matches current target_name in patterns provided + pattern_keys = list(chain(lora_config.rank_pattern.keys(), lora_config.alpha_pattern.keys())) + target_name_key = next(filter(lambda key: re.match(rf".*\.{key}$", current_key), pattern_keys), current_key) + r = lora_config.rank_pattern.get(target_name_key, lora_config.r) + alpha = lora_config.alpha_pattern.get(target_name_key, lora_config.lora_alpha) + + kwargs = { + "r": r, + "lora_alpha": alpha, + "lora_dropout": lora_config.lora_dropout, + "fan_in_fan_out": lora_config.fan_in_fan_out, + "init_lora_weights": lora_config.init_lora_weights, + "use_rslora": lora_config.use_rslora, + "use_dora": lora_config.use_dora, + "loaded_in_8bit": getattr(self.model, "is_loaded_in_8bit", False), + "loaded_in_4bit": getattr(self.model, "is_loaded_in_4bit", False), + } + + quant_methods = ["gptq", "aqlm", "awq"] + for quant_method in quant_methods: + quantization_config = get_quantization_config(self.model, method=quant_method) + if quantization_config is not None: + kwargs[f"{quant_method}_quantization_config"] = quantization_config + + # note: AdaLoraLayer is a subclass of LoraLayer, we need to exclude it + from peft.tuners.adalora import AdaLoraLayer + + if isinstance(target, LoraLayer) and not isinstance(target, AdaLoraLayer): + target.update_layer( + adapter_name, + r, + lora_alpha=alpha, + lora_dropout=lora_config.lora_dropout, + init_lora_weights=lora_config.init_lora_weights, + use_rslora=lora_config.use_rslora, + use_dora=lora_config.use_dora, + ) + else: + new_module = self._create_new_module(lora_config, adapter_name, target, **kwargs) + if adapter_name != self.active_adapter: + # adding an additional adapter: it is not automatically trainable + new_module.requires_grad_(False) + self._replace_module(parent, target_name, new_module, target) + + def _replace_module(self, parent, child_name, new_module, child): + setattr(parent, child_name, new_module) + # It's not necessary to set requires_grad here, as that is handled by + # _mark_only_adapters_as_trainable + + # child layer wraps the original module, unpack it + if hasattr(child, "base_layer"): + child = child.base_layer + + if not hasattr(new_module, "base_layer"): + new_module.weight = child.weight + if hasattr(child, "bias"): + new_module.bias = child.bias + + if getattr(child, "state", None) is not None: + if hasattr(new_module, "base_layer"): + new_module.base_layer.state = child.state + else: + new_module.state = child.state + new_module.to(child.weight.device) + + # dispatch to correct device + for name, module in new_module.named_modules(): + if (self.prefix in name) or ("ranknum" in name): + weight = child.qweight if hasattr(child, "qweight") else child.weight + module.to(weight.device) + + def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None: + for n, p in model.named_parameters(): + if self.prefix not in n: + p.requires_grad = False + + for active_adapter in self.active_adapters: + bias = self.peft_config[active_adapter].bias + if bias == "none": + continue + + if bias == "all": + for n, p in model.named_parameters(): + if "bias" in n: + p.requires_grad = True + elif bias == "lora_only": + for m in model.modules(): + if isinstance(m, LoraLayer) and hasattr(m, "bias") and m.bias is not None: + m.bias.requires_grad = True + else: + raise NotImplementedError(f"Requested bias: {bias}, is not implemented.") + + @staticmethod + def _create_new_module(lora_config, adapter_name, target, **kwargs): + # Collect dispatcher functions to decide what backend to use for the replaced LoRA layer. The order matters, + # because the first match is always used. Therefore, the default layers should be checked last. + dispatchers = [] + + # avoid eager bnb import + if is_bnb_available(): + from .bnb import dispatch_bnb_8bit + + dispatchers.append(dispatch_bnb_8bit) + + if is_bnb_4bit_available(): + from .bnb import dispatch_bnb_4bit + + dispatchers.append(dispatch_bnb_4bit) + + dispatchers.extend([dispatch_aqlm, dispatch_awq, dispatch_gptq, dispatch_megatron, dispatch_default]) + + new_module = None + for dispatcher in dispatchers: + new_module = dispatcher(target, adapter_name, lora_config=lora_config, **kwargs) + if new_module is not None: # first match wins + break + + if new_module is None: + # no module could be matched + raise ValueError( + f"Target module {target} is not supported. Currently, only the following modules are supported: " + "`torch.nn.Linear`, `torch.nn.Embedding`, `torch.nn.Conv2d`, `transformers.pytorch_utils.Conv1D`." + ) + + return new_module + + def __getattr__(self, name: str): + """Forward missing attributes to the wrapped module.""" + try: + return super().__getattr__(name) # defer to nn.Module's logic + except AttributeError: + return getattr(self.model, name) + + def get_peft_config_as_dict(self, inference: bool = False): + config_dict = {} + for key, value in self.peft_config.items(): + config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()} + if inference: + config["inference_mode"] = True + config_dict[key] = config + return config + + def _set_adapter_layers(self, enabled: bool = True) -> None: + for module in self.model.modules(): + if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): + module.enable_adapters(enabled) + + def enable_adapter_layers(self) -> None: + """Enable all adapters. + + Call this if you have previously disabled all adapters and want to re-enable them. + """ + self._set_adapter_layers(enabled=True) + + def disable_adapter_layers(self) -> None: + """Disable all adapters. + + When disabling all adapters, the model output corresponds to the output of the base model. + """ + for active_adapter in self.active_adapters: + val = self.peft_config[active_adapter].bias + if val != "none": + msg = ( + f"Careful, disabling adapter layers with bias configured to be '{val}' does not produce the same " + "output as the the base model would without adaption." + ) + warnings.warn(msg) + self._set_adapter_layers(enabled=False) + + def set_adapter(self, adapter_name: str | list[str]) -> None: + """Set the active adapter(s). + + Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is + not desired, use the following code. + + ```py + >>> for name, param in model_peft.named_parameters(): + ... if ...: # some check on name (ex. if 'lora' in name) + ... param.requires_grad = False + ``` + + Args: + adapter_name (`str` or `list[str]`): Name of the adapter(s) to be activated. + """ + for module in self.model.modules(): + if isinstance(module, LoraLayer): + if module.merged: + warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.") + module.unmerge() + module.set_adapter(adapter_name) + self.active_adapter = adapter_name + + @contextmanager + def _enable_peft_forward_hooks(self, *args, **kwargs): + # If adapter_names is passed as an argument, we inject it into the forward arguments. + adapter_names = kwargs.pop("adapter_names", None) + if adapter_names is None: + # nothing to do + yield + return + + if self.training: + raise ValueError("Cannot pass `adapter_names` when the model is in training mode.") + + hook_handles = [] + for module in self.modules(): + if isinstance(module, LoraLayer): + pre_forward = partial(_adapter_names_pre_forward_hook, adapter_names=adapter_names) + handle = module.register_forward_pre_hook(pre_forward, with_kwargs=True) + hook_handles.append(handle) + + yield + + for handle in hook_handles: + handle.remove() + + def _check_merge_allowed(self): + """Verify that the configuration supports merging. + + Currently gptq quantization and replicated layers do not support merging. + """ + if getattr(self.model, "quantization_method", None) == "gptq": + raise ValueError("Cannot merge LORA layers when the model is gptq quantized") + if self.peft_config.get("layer_replication"): + raise ValueError("Cannot merge LORA layers when base model layers are replicated") + + @staticmethod + def _prepare_adapter_config(peft_config, model_config): + if peft_config.target_modules is None: + if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING: + raise ValueError("Please specify `target_modules` in `peft_config`") + peft_config.target_modules = set( + TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config["model_type"]] + ) + return peft_config + + def _unload_and_optionally_merge( + self, + merge=True, + progressbar: bool = False, + safe_merge: bool = False, + adapter_names: Optional[list[str]] = None, + ): + if merge: + self._check_merge_allowed() + + key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] + desc = "Unloading " + ("and merging " if merge else "") + "model" + for key in tqdm(key_list, disable=not progressbar, desc=desc): + try: + parent, target, target_name = _get_submodules(self.model, key) + except AttributeError: + continue + with onload_layer(target): + if hasattr(target, "base_layer"): + if merge: + target.merge(safe_merge=safe_merge, adapter_names=adapter_names) + self._replace_module(parent, target_name, target.get_base_layer(), target) + elif isinstance(target, ModulesToSaveWrapper): + # save any additional trainable modules part of `modules_to_save` + new_module = target.modules_to_save[target.active_adapter] + if hasattr(new_module, "base_layer"): + # check if the module is itself a tuner layer + if merge: + new_module.merge(safe_merge=safe_merge, adapter_names=adapter_names) + new_module = new_module.get_base_layer() + setattr(parent, target_name, new_module) + + return self.model + + def add_weighted_adapter( + self, + adapters, + weights, + adapter_name, + combination_type="svd", + svd_rank=None, + svd_clamp=None, + svd_full_matrices=True, + svd_driver=None, + density=None, + majority_sign_method: Literal["total", "frequency"] = "total", + ) -> None: + """ + This method adds a new adapter by merging the given adapters with the given weights. + + When using the `cat` combination_type you should be aware that rank of the resulting adapter will be equal to + the sum of all adapters ranks. So it's possible that the mixed adapter may become too big and result in OOM + errors. + + Args: + adapters (`list`): + List of adapter names to be merged. + weights (`list`): + List of weights for each adapter. + adapter_name (`str`): + Name of the new adapter. + combination_type (`str`): + The merging type can be one of [`svd`, `linear`, `cat`, `ties`, `ties_svd`, `dare_ties`, `dare_linear`, + `dare_ties_svd`, `dare_linear_svd`, `magnitude_prune`, `magnitude_prune_svd`]. When using the `cat` + combination_type, the rank of the resulting adapter is equal to the sum of all adapters ranks (the + mixed adapter may be too big and result in OOM errors). + svd_rank (`int`, *optional*): + Rank of output adapter for svd. If None provided, will use max rank of merging adapters. + svd_clamp (`float`, *optional*): + A quantile threshold for clamping SVD decomposition output. If None is provided, do not perform + clamping. Defaults to None. + svd_full_matrices (`bool`, *optional*): + Controls whether to compute the full or reduced SVD, and consequently, the shape of the returned + tensors U and Vh. Defaults to True. + svd_driver (`str`, *optional*): + Name of the cuSOLVER method to be used. This keyword argument only works when merging on CUDA. Can be + one of [None, `gesvd`, `gesvdj`, `gesvda`]. For more info please refer to `torch.linalg.svd` + documentation. Defaults to None. + density (`float`, *optional*): + Value between 0 and 1. 0 means all values are pruned and 1 means no values are pruned. Should be used + with [`ties`, `ties_svd`, `dare_ties`, `dare_linear`, `dare_ties_svd`, `dare_linear_svd`, + `magnintude_prune`, `magnitude_prune_svd`] + majority_sign_method (`str`): + The method, should be one of ["total", "frequency"], to use to get the magnitude of the sign values. + Should be used with [`ties`, `ties_svd`, `dare_ties`, `dare_ties_svd`] + """ + + if adapter_name in list(self.peft_config.keys()): + return + for adapter in adapters: + if adapter not in list(self.peft_config.keys()): + raise ValueError(f"Adapter {adapter} does not exist") + + # if there is only one adapter, we can only use linear merging + combination_type = "linear" if len(adapters) == 1 else combination_type + + adapters_ranks = [self.peft_config[adapter].r for adapter in adapters] + if combination_type in ("linear", "ties", "dare_ties", "dare_linear", "magnitude_prune"): + # all adapters ranks should be same, new rank is just this value + if len(set(adapters_ranks)) != 1: + raise ValueError( + "All adapters must have the same r value when using combination_type linear, ties, dare_ties or dare_linear." + ) + new_rank = adapters_ranks[0] + elif combination_type == "cat": + # adapters ranks may be different, new rank is sum of all ranks + # be careful, because output adapter rank may be really big if mixing a lot of adapters + new_rank = sum(adapters_ranks) + elif combination_type.endswith("svd"): + # new rank is the max of all ranks of the adapters if not provided + new_rank = svd_rank or max(adapters_ranks) + else: + raise ValueError(f"Invalid combination_type: {combination_type}") + + target_module_types = [type(self.peft_config[adapter].target_modules) for adapter in adapters] + if not target_module_types: + raise ValueError(f"Found no adapter matching the names in {adapters}") + if len(set(target_module_types)) > 1: + raise ValueError( + "all adapter configs should follow the same target modules type. " + "Combining adapters with `target_modules` type being a mix of list/set and string is not supported." + ) + + if target_module_types[0] == str: + new_target_modules = "|".join(f"({self.peft_config[adapter].target_modules})" for adapter in adapters) + elif target_module_types[0] == set: + new_target_modules = reduce( + operator.or_, (self.peft_config[adapter].target_modules for adapter in adapters) + ) + else: + raise TypeError(f"Invalid type {target_module_types[0]} found in target_modules") + + self.peft_config[adapter_name] = replace( + self.peft_config[adapters[0]], + r=new_rank, + lora_alpha=new_rank, + target_modules=new_target_modules, + ) + self.inject_adapter(self.model, adapter_name) + + # Do we really need that? + _freeze_adapter(self.model, adapter_name) + + key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] + for key in key_list: + _, target, _ = _get_submodules(self.model, key) + if isinstance(target, LoraLayer): + if adapter_name in target.lora_A: + target_lora_A = target.lora_A[adapter_name].weight + target_lora_B = target.lora_B[adapter_name].weight + elif adapter_name in target.lora_embedding_A: + target_lora_A = target.lora_embedding_A[adapter_name] + target_lora_B = target.lora_embedding_B[adapter_name] + else: + continue + + target_lora_A.data = target_lora_A.data * 0.0 + target_lora_B.data = target_lora_B.data * 0.0 + if combination_type == "cat": + loras_A, loras_B = [], [] + for adapter, weight in zip(adapters, weights): + if adapter in target.lora_A: + current_adapter_lora_A = target.lora_A[adapter].weight + current_adapter_lora_B = target.lora_B[adapter].weight + elif adapter in target.lora_embedding_A: + current_adapter_lora_A = target.lora_embedding_A[adapter] + current_adapter_lora_B = target.lora_embedding_B[adapter] + else: + continue + loras_A.append(current_adapter_lora_A.data * weight * target.scaling[adapter]) + loras_B.append(current_adapter_lora_B.data) + + if len(loras_A) == 0: + raise ValueError("No matching LoRAs found. Please raise an issue on GitHub.") + loras_A = torch.cat(loras_A, dim=0) + loras_B = torch.cat(loras_B, dim=1) + target_lora_A.data[: loras_A.shape[0], :] = loras_A + target_lora_B.data[:, : loras_B.shape[1]] = loras_B + elif combination_type in [ + "svd", + "ties_svd", + "dare_linear_svd", + "dare_ties_svd", + "magnitude_prune_svd", + ]: + target_lora_A.data, target_lora_B.data = self._svd_generalized_task_arithmetic_weighted_adapter( + combination_type, + adapters, + weights, + new_rank, + target, + target_lora_A, + target_lora_B, + density, + majority_sign_method, + svd_clamp, + full_matrices=svd_full_matrices, + driver=svd_driver, + ) + elif combination_type in ["linear", "ties", "dare_linear", "dare_ties", "magnitude_prune"]: + target_lora_A.data, target_lora_B.data = self._generalized_task_arithmetic_weighted_adapter( + combination_type, adapters, weights, target, density, majority_sign_method + ) + + def _svd_generalized_task_arithmetic_weighted_adapter( + self, + combination_type, + adapters, + weights, + new_rank, + target, + target_lora_A, + target_lora_B, + density, + majority_sign_method, + clamp=None, + full_matrices=True, + driver=None, + ): + valid_adapters = [] + valid_weights = [] + is_embedding = any(adapter in target.lora_embedding_A for adapter in adapters) + for adapter, weight in zip(adapters, weights): + if adapter in target.lora_A or adapter in target.lora_embedding_A: + valid_adapters.append(adapter) + valid_weights.append(weight * target.scaling[adapter]) + + # if no valid adapter, nothing to do + if len(valid_adapters) == 0: + raise ValueError("No matching LoRAs found. Please raise an issue on Github.") + delta_weight = [target.get_delta_weight(adapter) for adapter in valid_adapters] + valid_weights = torch.tensor(valid_weights).to(delta_weight[0].device) + if combination_type == "svd": + delta_weight = task_arithmetic(delta_weight, valid_weights) + elif combination_type == "ties_svd": + delta_weight = ties(delta_weight, valid_weights, density, majority_sign_method) + elif combination_type == "dare_linear_svd": + delta_weight = dare_linear(delta_weight, valid_weights, density) + elif combination_type == "dare_ties_svd": + delta_weight = dare_ties(delta_weight, valid_weights, density, majority_sign_method) + elif combination_type == "magnitude_prune_svd": + delta_weight = magnitude_prune(delta_weight, valid_weights, density) + else: + raise ValueError(f"Invalid value passed to combination type: {combination_type}") + + conv2d = isinstance(target, Conv2d) + if conv2d: + conv2d_1x1 = target.weight.size()[2:4] == (1, 1) + if not conv2d_1x1: + delta_weight = delta_weight.flatten(start_dim=1) + else: + delta_weight = delta_weight.squeeze() + if (hasattr(target, "fan_in_fan_out") and target.fan_in_fan_out) or is_embedding: + delta_weight = delta_weight.T + + # based on https://github.com/kohya-ss/sd-scripts/blob/main/networks/svd_merge_lora.py#L114-L131 + U, S, Vh = torch.linalg.svd(delta_weight, full_matrices=full_matrices, driver=driver) + U = U[:, :new_rank] + S = S[:new_rank] + U = U @ torch.diag(S) + Vh = Vh[:new_rank, :] + if clamp is not None: + dist = torch.cat([U.flatten(), Vh.flatten()]) + hi_val = torch.quantile(dist, clamp) + low_val = -hi_val + U = U.clamp(low_val, hi_val) + Vh = Vh.clamp(low_val, hi_val) + if conv2d: + U = U.reshape(target_lora_B.data.shape) + Vh = Vh.reshape(target_lora_A.data.shape) + return Vh, U + + def _generalized_task_arithmetic_weighted_adapter( + self, + combination_type, + adapters, + weights, + target, + density, + majority_sign_method, + ): + # account weights for LoRA A and B layers. + valid_weights = [] + lora_A_deltas = [] + lora_B_deltas = [] + for adapter, weight in zip(adapters, weights): + if adapter in target.lora_A: + current_adapter_lora_A = target.lora_A[adapter].weight + current_adapter_lora_B = target.lora_B[adapter].weight + elif adapter in target.lora_embedding_A: + current_adapter_lora_A = target.lora_embedding_A[adapter] + current_adapter_lora_B = target.lora_embedding_B[adapter] + else: + continue + valid_weights.append(math.sqrt(weight * target.scaling[adapter])) + lora_A_deltas.append(current_adapter_lora_A.data) + lora_B_deltas.append(current_adapter_lora_B.data) + valid_weights = torch.tensor(valid_weights).to(lora_A_deltas[0].device) + lora_deltas = [lora_A_deltas, lora_B_deltas] + dtype = lora_A_deltas[0].dtype + for i, task_tensors in enumerate(lora_deltas): + if combination_type == "linear": + lora_deltas[i] = task_arithmetic(task_tensors, valid_weights) + elif combination_type == "ties": + lora_deltas[i] = ties(task_tensors, valid_weights, density, majority_sign_method) + elif combination_type == "dare_linear": + lora_deltas[i] = dare_linear(task_tensors, valid_weights, density) + elif combination_type == "dare_ties": + lora_deltas[i] = dare_ties(task_tensors, valid_weights, density, majority_sign_method) + elif combination_type == "magnitude_prune": + lora_deltas[i] = magnitude_prune(task_tensors, valid_weights, density) + else: + raise ValueError("Invalid combination type") + lora_deltas = [delta.to(dtype) for delta in lora_deltas] + return lora_deltas + + def delete_adapter(self, adapter_name: str) -> None: + """ + Deletes an existing adapter. + + Args: + adapter_name (str): Name of the adapter to be deleted. + """ + if adapter_name not in list(self.peft_config.keys()): + raise ValueError(f"Adapter {adapter_name} does not exist") + del self.peft_config[adapter_name] + + key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] + new_adapter = None + for key in key_list: + _, target, _ = _get_submodules(self.model, key) + if isinstance(target, LoraLayer): + target.delete_adapter(adapter_name) + if new_adapter is None: + new_adapter = target.active_adapters[:] + + self.active_adapter = new_adapter or [] + + def merge_and_unload( + self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None + ) -> torch.nn.Module: + r""" + This method merges the LoRa layers into the base model. This is needed if someone wants to use the base model + as a standalone model. + + Args: + progressbar (`bool`): + whether to show a progressbar indicating the unload and merge process + safe_merge (`bool`): + whether to activate the safe merging check to check if there is any potential Nan in the adapter + weights + adapter_names (`List[str]`, *optional*): + The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults + to `None`. + Example: + + ```py + >>> from transformers import AutoModelForCausalLM + >>> from peft import PeftModel + + >>> base_model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-40b") + >>> peft_model_id = "smangrul/falcon-40B-int4-peft-lora-sfttrainer-sample" + >>> model = PeftModel.from_pretrained(base_model, peft_model_id) + >>> merged_model = model.merge_and_unload() + ``` + """ + return self._unload_and_optionally_merge( + progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names + ) + + def unload(self) -> torch.nn.Module: + """ + Gets back the base model by removing all the lora modules without merging. This gives back the original base + model. + """ + return self._unload_and_optionally_merge(merge=False) diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/tp_layer.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/tp_layer.py new file mode 100644 index 0000000000000000000000000000000000000000..f98b42c15f0a0fa929c2dd13a94a8ffc19bf9467 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/tp_layer.py @@ -0,0 +1,230 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib +import warnings +from typing import Any, Optional + +import torch +import torch.nn as nn +import torch.nn.init as init + +from peft.tuners.tuners_utils import BaseTunerLayer + +from .layer import LoraLayer + + +class LoraParallelLinear(nn.Module, LoraLayer): + """ + When the target layer parallel_linear is RowParallelLinear, in order to keep the input and output shapes + consistent, we need to split the lora matrix A into rows, and the lora_B at this time should be a complete linear + layer; In the same way, when the target layer is ColumnParallelLinear, we perform column segmentation on lora_B, + while lora_A is still a complete linear layer. + """ + + def __init__( + self, + base_layer, + adapter_name: str, + backend, + r: int = 0, + lora_alpha: int = 1, + lora_dropout: float = 0.0, + fan_in_fan_out: bool = False, + init_lora_weights: bool = True, + use_rslora: bool = False, + use_dora: bool = False, + **kwargs, + ): + super().__init__() + LoraLayer.__init__(self, base_layer=base_layer) + + if use_dora: + raise ValueError(f"{self.__class__.__name__} does not support DoRA yet, please set it to False") + + self.backend = backend + self.is_parallel_a = isinstance(base_layer, backend.RowParallelLinear) + self.fan_in_fan_out = fan_in_fan_out + self._active_adapter = adapter_name + + megatron_config = kwargs["megatron_config"] + parallel_linear_kwargs = {"megatron_config": megatron_config} + init_method = init.xavier_normal_ + if hasattr(megatron_config, "init_method"): + init_method = megatron_config.init_method + input_is_parallel = True + gather_output = False + if isinstance(base_layer, self.backend.RowParallelLinear): + input_is_parallel = base_layer.input_is_parallel + else: + gather_output = base_layer.gather_output + self.update_layer( + adapter_name, + r, + lora_alpha=lora_alpha, + lora_dropout=lora_dropout, + init_lora_weights=init_lora_weights, + use_rslora=use_rslora, + use_dora=use_dora, + init_method=init_method, + input_is_parallel=input_is_parallel, + gather_output=gather_output, + **parallel_linear_kwargs, + ) + + self.is_target_conv_1d_layer = False + + def update_layer( + self, + adapter_name, + r, + lora_alpha, + lora_dropout, + init_lora_weights, + use_rslora, + use_dora=False, + init_method=init.xavier_normal_, + input_is_parallel=True, + gather_output=False, + **parallel_linear_kwargs, + ): + if r <= 0: + raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") + self.r[adapter_name] = r + self.lora_alpha[adapter_name] = lora_alpha + if lora_dropout > 0.0: + lora_dropout_layer = nn.Dropout(p=lora_dropout) + else: + lora_dropout_layer = nn.Identity() + + self.lora_dropout[adapter_name] = lora_dropout_layer + + megatron_config = parallel_linear_kwargs["megatron_config"] + # lora needs to be forced to upgrade to 32-bit precision, otherwise it will overflow + megatron_config.params_dtype = torch.float32 + if self.is_parallel_a: + lora_a = self.backend.RowParallelLinear( + input_size=self.in_features, + output_size=r, + bias=False, + input_is_parallel=input_is_parallel, + skip_bias_add=True, + init_method=init_method, + config=megatron_config, + ) + lora_b = nn.Linear(in_features=r, out_features=self.out_features, bias=False, dtype=torch.float32) + else: + lora_a = nn.Linear(in_features=self.in_features, out_features=r, bias=False, dtype=torch.float32) + lora_b = self.backend.ColumnParallelLinear( + input_size=r, + output_size=self.out_features, + bias=False, + gather_output=gather_output, + init_method=init_method, + config=megatron_config, + ) + self.lora_A[adapter_name] = lora_a + self.lora_B[adapter_name] = lora_b + if use_rslora: + self.scaling[adapter_name] = lora_alpha / (r**0.5) + else: + self.scaling[adapter_name] = lora_alpha / r + if init_lora_weights: + self.reset_lora_parameters(adapter_name, init_lora_weights) + + weight = getattr(self.get_base_layer(), "weight", None) + if weight is not None: + # the layer is already completely initialized, this is an update + if weight.dtype.is_floating_point or weight.dtype.is_complex: + self.to(weight.device, dtype=weight.dtype) + else: + self.to(weight.device) + self.set_adapter(self.active_adapters) + + def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any): + previous_dtype = x.dtype + # If weight is used for matrix multiplication here, the final aggregation operation of the original + # parallel_linear layer will be missing, so we need to directly call its forward function to obtain the + # output of the original parallel_linear layer. + if self.disable_adapters: + if self.merged: + self.unmerge() + result, bias = self.base_layer(x, *args, **kwargs) + elif self.merged: + result, bias = self.base_layer(x, *args, **kwargs) + else: + result, bias = self.base_layer(x, *args, **kwargs) + for active_adapter in self.active_adapters: + if active_adapter not in self.lora_A.keys(): + continue + lora_A = self.lora_A[active_adapter] + lora_B = self.lora_B[active_adapter] + dropout = self.lora_dropout[active_adapter] + scaling = self.scaling[active_adapter] + x = x.to(lora_A.weight.dtype) + + lora_result = lora_A(dropout(x)) + if isinstance(lora_result, tuple): + lora_result = lora_result[0] + lora_result = lora_B(lora_result) + if isinstance(lora_result, tuple): + lora_result = lora_result[0] + lora_result = lora_result * scaling + + result = result + lora_result + + result = result.to(previous_dtype) + return result, bias + + +def dispatch_megatron( + target: torch.nn.Module, + adapter_name: str, + lora_config, + **kwargs: Any, +) -> Optional[torch.nn.Module]: + new_module = None + + if isinstance(target, BaseTunerLayer): + target_base_layer = target.get_base_layer() + else: + target_base_layer = target + + if lora_config.megatron_config: + megatron_core = importlib.import_module(lora_config.megatron_core) + else: + megatron_core = None + + if megatron_core and isinstance( + target_base_layer, + (megatron_core.tensor_parallel.ColumnParallelLinear, megatron_core.tensor_parallel.RowParallelLinear), + ): + megatron_kwargs = kwargs.copy() + megatron_config = lora_config.megatron_config + if isinstance(megatron_config, dict): + transformer_config_class = megatron_core.transformer.transformer_config.TransformerConfig + megatron_config = transformer_config_class(**lora_config.megatron_config) + megatron_kwargs["megatron_config"] = megatron_config + if megatron_kwargs["fan_in_fan_out"]: + warnings.warn( + "fan_in_fan_out is set to True but the target module is `ColumnParallelLinear` " + "or `RowParallelLinear`. " + "Setting fan_in_fan_out to False." + ) + megatron_kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False + new_module = LoraParallelLinear( + base_layer=target, adapter_name=adapter_name, backend=megatron_core.tensor_parallel, **megatron_kwargs + ) + + return new_module diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/lycoris_utils.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lycoris_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..39c750ae8e8c2c8ac567f76c6ea70b638f29cb24 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lycoris_utils.py @@ -0,0 +1,428 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import warnings +from abc import abstractmethod +from dataclasses import dataclass, field +from typing import Any, Optional, Union + +import torch +import torch.nn as nn +from tqdm import tqdm + +from peft.config import PeftConfig +from peft.utils import ( + ModulesToSaveWrapper, + _get_submodules, +) + +from .tuners_utils import BaseTuner, BaseTunerLayer, check_adapters_to_merge, check_target_module_exists + + +@dataclass +class LycorisConfig(PeftConfig): + r""" + A base config for LyCORIS like adapters + """ + + rank_pattern: Optional[dict] = field( + default_factory=dict, + metadata={ + "help": ( + "The mapping from layer names or regexp expression to ranks which are different from the default rank specified by `r`. " + "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 8`}" + ) + }, + ) + alpha_pattern: Optional[dict] = field( + default_factory=dict, + metadata={ + "help": ( + "The mapping from layer names or regexp expression to alphas which are different from the default alpha specified by `alpha`. " + "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 32`}" + ) + }, + ) + + +class LycorisLayer(BaseTunerLayer): + r""" + A base layer for LyCORIS like adapters + """ + + # adapter_layer_names needs to be defined on the child class + other_param_names = ("r", "alpha", "scaling", "rank_dropout", "module_dropout") + + def __init__(self, base_layer: nn.Module) -> None: + self.base_layer = base_layer + self.r = {} + self.alpha = {} + self.scaling = {} + self.rank_dropout = {} + self.module_dropout = {} + + # Tuner info + self._disable_adapters = False + self.merged_adapters = [] + + @property + @abstractmethod + def _available_adapters(self) -> set[str]: + ... + + def _init_empty_weights(self, cls, *args, **kwargs) -> None: + # A helper method that allows to initialize the layer of the given class without spending time to initialize the + # model weights. The implementation is inspired by + # https://pytorch.org/docs/stable/generated/torch.nn.utils.skip_init.html but this function cannot be used + # directly. + # Instead of this approach, it would be possible to bypass the __init__ of the class but that runs the risk of + # omitting important logic inside that __init__. + kwargs = kwargs.copy() + final_device = kwargs.pop("device", "cpu") + cls.__init__(self, *args, device="meta", **kwargs) + self.to_empty(device=final_device) + + @abstractmethod + def create_adapter_parameters(self, adapter_name: str, r: int, **kwargs): + ... + + # TODO: refactor LoRA to use the same approach + @abstractmethod + def _get_delta_activations(self, adapter_name: str, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: + """Activations added on top of the base layer output (i.e. after the base layer forward pass)""" + + @abstractmethod + def get_delta_weight(self, adapter_name: str) -> torch.Tensor: + ... + + def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: + """ + Merge the active adapter weights into the base weights + + Args: + safe_merge (`bool`, *optional*): + If `True`, the merge operation will be performed in a copy of the original weights and check for NaNs + before merging the weights. This is useful if you want to check if the merge operation will produce + NaNs. Defaults to `False`. + adapter_names (`List[str]`, *optional*): + The list of adapter names that should be merged. If `None`, all active adapters will be merged. + Defaults to `None`. + """ + adapter_names = check_adapters_to_merge(self, adapter_names) + if not adapter_names: + # no adapter to merge + return + + for active_adapter in adapter_names: + if active_adapter in self._available_adapters: + base_layer = self.get_base_layer() + if safe_merge: + orig_weights = base_layer.weight.data.clone() + orig_weights += self.get_delta_weight(active_adapter) + + if not torch.isfinite(orig_weights).all(): + raise ValueError( + f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" + ) + + base_layer.weight.data = orig_weights + else: + base_layer.weight.data += self.get_delta_weight(active_adapter) + self.merged_adapters.append(active_adapter) + + @abstractmethod + def reset_adapter_parameters(self, adapter_name: str): + ... + + def set_scale(self, adapter, scale): + if adapter not in self._available_adapters: + # Ignore the case where the adapter is not in the layer + return + self.scaling[adapter] = scale * self.alpha[adapter] / self.r[adapter] + + def scale_layer(self, scale: float) -> None: + if scale == 1: + return + + for active_adapter in self.active_adapters: + if active_adapter not in self._available_adapters: + continue + + self.scaling[active_adapter] *= scale + + def unmerge(self) -> None: + """ + This method unmerges all merged adapter layers from the base weights. + """ + if not self.merged: + warnings.warn("Already unmerged. Nothing to do.") + return + while len(self.merged_adapters) > 0: + active_adapter = self.merged_adapters.pop() + if active_adapter in self._available_adapters: + self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter) + + def unscale_layer(self, scale=None) -> None: + for active_adapter in self.active_adapters: + if active_adapter not in self._available_adapters: + continue + + if scale is None: + self.scaling[active_adapter] = self.alpha[active_adapter] / self.r[active_adapter] + else: + self.scaling[active_adapter] /= scale + + @abstractmethod + def update_layer(self, adapter_name: str, r: int, alpha: float, **kwargs): + ... + + +class LycorisTuner(BaseTuner): + r""" + A base tuner for LyCORIS like adapters + """ + + prefix: str + layers_mapping: dict[type[torch.nn.Module], type[LycorisLayer]] + + def __init__(self, model, config, adapter_name): + super().__init__(model, config, adapter_name) + + def __getattr__(self, name: str): + """Forward missing attributes to the wrapped module.""" + try: + return super().__getattr__(name) # defer to nn.Module's logic + except AttributeError: + return getattr(self.model, name) + + @staticmethod + def _check_target_module_exists(config, key): + return check_target_module_exists(config, key) + + @abstractmethod + def _create_and_replace( + self, + config: LycorisConfig, + adapter_name: str, + target: Union[LycorisLayer, nn.Module], + target_name, + parent, + current_key, + ): + ... + + @classmethod + def _create_new_module(cls, config: LycorisConfig, adapter_name: str, target: nn.Module, **kwargs) -> LycorisLayer: + # Find corresponding subtype of provided target module + new_module_cls = None + for subtype, target_cls in cls.layers_mapping.items(): + if ( + hasattr(target, "base_layer") + and isinstance(target.get_base_layer(), subtype) + and isinstance(target, BaseTunerLayer) + ): + # nested tuner layers are allowed + new_module_cls = target_cls + break + elif isinstance(target, subtype): + new_module_cls = target_cls + break + + # We didn't find corresponding type, so adapter for this layer is not supported + if new_module_cls is None: + supported_modules = ", ".join(layer.__name__ for layer in cls.layers_mapping.keys()) + raise ValueError( + f"Target module of type {type(target)} not supported, " + f"currently only adapters for {supported_modules} are supported" + ) + + if isinstance(target, BaseTunerLayer): + target_base_layer = target.get_base_layer() + else: + target_base_layer = target + + if isinstance(target_base_layer, torch.nn.Conv2d): + new_module = new_module_cls(target, adapter_name=adapter_name, **kwargs) + elif isinstance(target_base_layer, torch.nn.Linear): + new_module = new_module_cls(target, adapter_name=adapter_name, **kwargs) + else: + supported_modules = ", ".join(layer.__name__ for layer in cls.layers_mapping.keys()) + raise ValueError( + f"Target module of type {type(target)} not supported, " + f"currently only adapters for {supported_modules} are supported" + ) + + return new_module + + def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None: + for n, p in model.named_parameters(): + if self.prefix not in n: + p.requires_grad = False + + @staticmethod + def _prepare_adapter_config(peft_config, model_config): + if peft_config.target_modules is None: + raise ValueError("Please specify `target_modules` in `peft_config`") + return peft_config + + def _replace_module(self, parent, child_name, new_module, child): + setattr(parent, child_name, new_module) + # It's not necessary to set requires_grad here, as that is handled by + # _mark_only_adapters_as_trainable + + if not hasattr(new_module, "base_layer"): + new_module.weight = child.weight + if hasattr(child, "bias"): + new_module.bias = child.bias + + if getattr(child, "state", None) is not None: + if hasattr(new_module, "base_layer"): + new_module.base_layer.state = child.state + else: + new_module.state = child.state + new_module.to(child.weight.device) + + # dispatch to correct device + for name, module in new_module.named_modules(): + if self.prefix in name: + module.to(child.weight.device) + + def _set_adapter_layers(self, enabled=True): + for module in self.model.modules(): + if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): + module.enable_adapters(enabled) + + def _unload_and_optionally_merge( + self, + merge: bool = True, + progressbar: bool = False, + safe_merge: bool = False, + adapter_names: Optional[list[str]] = None, + ): + if merge: + if getattr(self.model, "quantization_method", None) == "gptq": + raise ValueError("Cannot merge LOHA layers when the model is gptq quantized") + + self._unloading_checks(adapter_names) + key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] + desc = "Unloading " + ("and merging " if merge else "") + "model" + for key in tqdm(key_list, disable=not progressbar, desc=desc): + try: + parent, target, target_name = _get_submodules(self.model, key) + except AttributeError: + continue + + if hasattr(target, "base_layer"): + if merge: + target.merge(safe_merge=safe_merge, adapter_names=adapter_names) + self._replace_module(parent, target_name, target.get_base_layer(), target) + elif isinstance(target, ModulesToSaveWrapper): + # save any additional trainable modules part of `modules_to_save` + new_module = target.modules_to_save[target.active_adapter] + if hasattr(new_module, "base_layer"): + # check if the module is itself a tuner layer + if merge: + new_module.merge(safe_merge=safe_merge, adapter_names=adapter_names) + new_module = new_module.get_base_layer() + setattr(parent, target_name, new_module) + + return self.model + + def enable_adapter_layers(self) -> None: + """Enable all adapters. + + Call this if you have previously disabled all adapters and want to re-enable them. + """ + self._set_adapter_layers(enabled=True) + + def disable_adapter_layers(self) -> None: + """Disable all adapters. + + When disabling all adapters, the model output corresponds to the output of the base model. + """ + self._set_adapter_layers(enabled=False) + + def merge_and_unload( + self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None + ) -> torch.nn.Module: + r""" + This method merges the adapter layers into the base model. This is needed if someone wants to use the base + model as a standalone model. + + Args: + progressbar (`bool`): + whether to show a progressbar indicating the unload and merge process + safe_merge (`bool`): + whether to activate the safe merging check to check if there is any potential Nan in the adapter + weights + adapter_names (`List[str]`, *optional*): + The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults + to `None`. + + """ + return self._unload_and_optionally_merge( + progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names + ) + + def unload(self) -> torch.nn.Module: + """ + Gets back the base model by removing all the lora modules without merging. This gives back the original base + model. + """ + return self._unload_and_optionally_merge(merge=False) + + def set_adapter(self, adapter_name: str | list[str]) -> None: + """Set the active adapter(s). + + Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is + not desired, use the following code. + + ```py + >>> for name, param in model_peft.named_parameters(): + ... if ...: # some check on name (ex. if 'lora' in name) + ... param.requires_grad = False + ``` + + Args: + adapter_name (`str` or `list[str]`): Name of the adapter(s) to be activated. + """ + for module in self.model.modules(): + if isinstance(module, LycorisLayer): + if module.merged: + warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.") + module.unmerge() + module.set_adapter(adapter_name) + + def delete_adapter(self, adapter_name: str) -> None: + """ + Deletes an existing adapter. + + Args: + adapter_name (`str`): Name of the adapter to be deleted. + """ + if adapter_name not in list(self.peft_config.keys()): + raise ValueError(f"Adapter {adapter_name} does not exist") + del self.peft_config[adapter_name] + + key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] + new_adapter = None + for key in key_list: + _, target, _ = _get_submodules(self.model, key) + if isinstance(target, LycorisLayer): + target.delete_adapter(adapter_name) + if new_adapter is None: + new_adapter = target.active_adapters[:] + + self.active_adapter = new_adapter or [] diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/mixed/__init__.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/mixed/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2955d7258ddcf76b47b38fd6fd5ebeb3d1d6110c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/mixed/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .model import COMPATIBLE_TUNER_TYPES, MixedModel + + +__all__ = ["COMPATIBLE_TUNER_TYPES", "MixedModel"] diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/mixed/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/mixed/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2998b39ad6593c69a87d47bfbd57dac33581c28d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/mixed/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/mixed/__pycache__/model.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/mixed/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aad840783f589669cf9521c9bbd3d4d10211d8d3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/mixed/__pycache__/model.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/mixed/model.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/mixed/model.py new file mode 100644 index 0000000000000000000000000000000000000000..d292ffec37820d9c88e374cf9891ec1485e41b8b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/mixed/model.py @@ -0,0 +1,339 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import warnings +from typing import Any, Optional, Union + +from torch import nn +from tqdm import tqdm + +from peft.tuners import adalora, loha, lokr, lora, oft +from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists +from peft.utils import ( + TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, + ModulesToSaveWrapper, + PeftType, + _get_submodules, + get_auto_gptq_quant_linear, +) + + +# Collection of constants used for all tuners +COMPATIBLE_TUNER_TYPES = (PeftType.LORA, PeftType.LOHA, PeftType.LOKR, PeftType.ADALORA, PeftType.OFT) +PREFIXES = [lora.LoraModel.prefix, lokr.LoKrModel.prefix, loha.LoHaModel.prefix, oft.OFTModel.prefix] +Configs = Union[lora.LoraConfig, loha.LoHaConfig, lokr.LoKrConfig, adalora.AdaLoraConfig, oft.OFTConfig] +Layers = (lora.layer.LoraLayer, loha.layer.LoHaLayer, lokr.layer.LoKrLayer, adalora.layer.AdaLoraLayer, oft.OFTLayer) + + +class MixedModel(BaseTuner): + """ + A class that allows to mix different types of adapters in a single model. + + Note: This class should usually not be initialized directly. Instead, use `get_peft_model` with the argument + `mixed=True`. + + Args: + model (:obj:`nn.Module`): + The model to be tuned. + config (:obj:`PeftConfig`): + The config of the model to be tuned. The adapter type must be compatible. + adapter_name (:obj:`str`): + The name of the first adapter. + """ + + def __init__(self, model: nn.Module, config: Configs, adapter_name: str) -> None: + super().__init__(model, config, adapter_name) + + def _check_new_adapter_config(self, config: Configs) -> None: + """ + A helper method to check the config when a new adapter is being added. + + Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters. + + """ + if not isinstance(config, Configs.__args__): + raise ValueError( + f"{self.__class__.__name__} only supports {COMPATIBLE_TUNER_TYPES} configs, but got {type(config)}." + ) + + biases = (getattr(config, "bias", None) for config in self.peft_config) + biases = [bias for bias in biases if bias not in (None, "none")] + if len(biases) > 1: + raise ValueError( + f"{self.__class__.__name__} supports only 1 adapter with bias. When using multiple adapters, " + "set bias to 'none' for all adapters." + ) + + @staticmethod + def _check_target_module_exists(config: Configs, key: str): + return check_target_module_exists(config, key) + + def _create_and_replace( + self, + config: Configs, + *args: Any, + **kwargs: Any, + ) -> None: + if isinstance(config, adalora.AdaLoraConfig): + adalora.AdaLoraModel._create_and_replace(self, config, *args, **kwargs) + elif isinstance(config, lora.LoraConfig): + lora.LoraModel._create_and_replace(self, config, *args, **kwargs) + elif isinstance(config, loha.LoHaConfig): + loha.LoHaModel._create_and_replace(self, config, *args, **kwargs) + elif isinstance(config, lokr.LoKrConfig): + lokr.LoKrModel._create_and_replace(self, config, *args, **kwargs) + elif isinstance(config, oft.OFTConfig): + oft.OFTModel._create_and_replace(self, config, *args, **kwargs) + else: + raise ValueError(f"Unsupported config type {type(config)}, should be one of {COMPATIBLE_TUNER_TYPES}.") + + def _replace_module(self, parent, child_name, new_module, child) -> None: + setattr(parent, child_name, new_module) + # It's not necessary to set requires_grad here, as that is handled by + # _mark_only_adapters_as_trainable + + # child layer wraps the original module, unpack it + if hasattr(child, "base_layer"): + child = child.get_base_layer() + elif hasattr(child, "quant_linear_module"): + # TODO maybe not necessary to have special treatment? + child = child.quant_linear_module + + if not hasattr(new_module, "base_layer"): + new_module.weight = child.weight + if hasattr(child, "bias"): + new_module.bias = child.bias + + if getattr(child, "state", None) is not None: + if hasattr(new_module, "base_layer"): + new_module.base_layer.state = child.state + else: + new_module.state = child.state + new_module.to(child.weight.device) + + # dispatch to correct device + for name, module in new_module.named_modules(): + if any(prefix in name for prefix in PREFIXES): + module.to(child.weight.device) + if "ranknum" in name: + module.to(child.weight.device) + + def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None: + for n, p in model.named_parameters(): + if not any(prefix in n for prefix in PREFIXES): + p.requires_grad = False + + for active_adapter in self.active_adapters: + bias = getattr(self.peft_config[active_adapter], "bias", "none") + if bias == "none": + continue + + if bias == "all": + for n, p in model.named_parameters(): + if "bias" in n: + p.requires_grad = True + elif bias == "lora_only": + # TODO: check if this is needed for other supported types + for m in model.modules(): + if isinstance(m, Layers) and hasattr(m, "bias") and m.bias is not None: + m.bias.requires_grad = True + else: + raise ValueError(f"Requested bias: {bias}, is not implemented.") + + @staticmethod + def _create_new_module(config, adapter_name, target, **kwargs): + gptq_quantization_config = kwargs.get("gptq_quantization_config", None) + AutoGPTQQuantLinear = get_auto_gptq_quant_linear(gptq_quantization_config) + if (gptq_quantization_config is not None) or (AutoGPTQQuantLinear is not None): + raise ValueError(f"GPTQ quantization not supported for {config.peft_type.value} (yet).") + + loaded_in_8bit = kwargs.pop("loaded_in_8bit", False) + loaded_in_4bit = kwargs.pop("loaded_in_4bit", False) + if loaded_in_8bit or loaded_in_4bit: + raise ValueError(f"8bit and 4bit quantization not supported for {config.peft_type.value} (yet).") + + if isinstance(config, adalora.AdaLoraConfig): + new_module = adalora.AdaLoraModel._create_new_module(config, adapter_name, target, **kwargs) + elif isinstance(config, lora.LoraConfig): + new_module = lora.LoraModel._create_new_module(config, adapter_name, target, **kwargs) + elif isinstance(config, loha.LoHaConfig): + new_module = loha.LoHaModel._create_new_module(config, adapter_name, target, **kwargs) + elif isinstance(config, lokr.LoKrConfig): + new_module = lokr.LoKrModel._create_new_module(config, adapter_name, target, **kwargs) + elif isinstance(config, oft.OFTConfig): + new_module = oft.OFTModel._create_new_module(config, adapter_name, target, **kwargs) + else: + raise ValueError(f"Unknown config type {type(config)}, should be one of {COMPATIBLE_TUNER_TYPES}.") + return new_module + + def __getattr__(self, name: str): + """Forward missing attributes to the wrapped module.""" + try: + return super().__getattr__(name) # defer to nn.Module's logic + except AttributeError: + return getattr(self.model, name) + + def _set_adapter_layers(self, enabled=True): + for module in self.model.modules(): + if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): + module.enable_adapters(enabled) + + def enable_adapter_layers(self): + self._set_adapter_layers(enabled=True) + + def disable_adapter_layers(self): + for active_adapter in self.active_adapters: + val = getattr(self.peft_config[active_adapter], "bias", "none") + if val != "none": + msg = ( + f"Careful, disabling adapter layers with bias configured to be '{val}' does not produce the same " + "output as the the base model would without adaption." + ) + warnings.warn(msg) + self._set_adapter_layers(enabled=False) + + def set_adapter(self, adapter_name: Union[str, list[str]]) -> None: + for module in self.model.modules(): + if isinstance(module, Layers): + if module.merged: + warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.") + module.unmerge() + module.set_adapter(adapter_name) + self.active_adapter = adapter_name + + @staticmethod + def _prepare_adapter_config(peft_config, model_config): + if peft_config.target_modules is None: + if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING: + raise ValueError("Please specify `target_modules` in `peft_config`") + + peft_config.target_modules = set( + TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config["model_type"]] + ) + return peft_config + + def _unload_and_optionally_merge( + self, + merge=True, + progressbar: bool = False, + safe_merge: bool = False, + adapter_names: Optional[list[str]] = None, + ): + if merge: + if getattr(self.model, "quantization_method", None) == "gptq": + raise ValueError("Cannot merge layers when the model is gptq quantized") + + def merge_recursively(module): + # helper function to recursively merge the base_layer of the target + path = [] + layer = module + while hasattr(layer, "base_layer"): + path.append(layer) + layer = layer.base_layer + for layer_before, layer_after in zip(path[:-1], path[1:]): + layer_after.merge(safe_merge=safe_merge, adapter_names=adapter_names) + layer_before.base_layer = layer_after.base_layer + module.merge(safe_merge=safe_merge, adapter_names=adapter_names) + + key_list = [key for key, _ in self.model.named_modules() if not any(prefix in key for prefix in PREFIXES)] + desc = "Unloading " + ("and merging " if merge else "") + "model" + + for key in tqdm(key_list, disable=not progressbar, desc=desc): + try: + parent, target, target_name = _get_submodules(self.model, key) + except AttributeError: + continue + + if hasattr(target, "base_layer"): + if merge: + merge_recursively(target) + self._replace_module(parent, target_name, target.get_base_layer(), target) + elif isinstance(target, ModulesToSaveWrapper): + # save any additional trainable modules part of `modules_to_save` + new_module = target.modules_to_save[target.active_adapter] + if hasattr(new_module, "base_layer"): + # check if the module is itself a tuner layer + if merge: + new_module.merge(safe_merge=safe_merge, adapter_names=adapter_names) + new_module = new_module.get_base_layer() + setattr(parent, target_name, new_module) + + return self.model + + def add_weighted_adapter(self, *args: Any, **kwargs: Any) -> None: + raise NotImplementedError(f"Weighted adapters are not supported for {self.__class__.__name__} (yet).") + + def delete_adapter(self, adapter_name: Union[str, list[str]]) -> None: + """ + Deletes an existing adapter. + + Args: + adapter_name (Union[str, list[str]]): Name of the adapter(s) to delete. + """ + if isinstance(adapter_name, str): + adapter_names = [adapter_name] + else: + adapter_names = adapter_name + + mismatched = set(adapter_names) - set(self.peft_config.keys()) + if mismatched: + raise ValueError( + f"Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}" + ) + + for adapter_name in adapter_names: + del self.peft_config[adapter_name] + + key_list = [key for key, _ in self.model.named_modules() if not any(prefix in key for prefix in PREFIXES)] + new_adapter = None + for key in key_list: + _, target, _ = _get_submodules(self.model, key) + if isinstance(target, BaseTunerLayer): + target.delete_adapter(adapter_name) + if new_adapter is None: + new_adapter = target.active_adapters[:] + + self.active_adapter = new_adapter or [] + + def merge_and_unload( + self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None + ) -> nn.Module: + r""" + This method merges the layers into the base model. This is needed if someone wants to use the base model as a + standalone model. + + Args: + progressbar (`bool`): + whether to show a progressbar indicating the unload and merge process + safe_merge (`bool`): + whether to activate the safe merging check to check if there is any potential Nan in the adapter + weights + adapter_names (`List[str]`, *optional*): + The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults + to `None`. + """ + return self._unload_and_optionally_merge( + progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names + ) + + def unload(self) -> nn.Module: + """ + Gets back the base model by removing all the lora modules without merging. This gives back the original base + model. + """ + return self._unload_and_optionally_merge(merge=False) + + def generate(self, *args: Any, **kwargs: Any): + return self.model.generate(*args, **kwargs) diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__init__.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..214f7722486485bea4ede3b5c1a433aac447dd2b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .config import MultitaskPromptTuningConfig, MultitaskPromptTuningInit +from .model import MultitaskPromptEmbedding + + +__all__ = ["MultitaskPromptTuningConfig", "MultitaskPromptTuningInit", "MultitaskPromptEmbedding"] diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d0a4401d63daa6d02a572c33463171abac54f66 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/config.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8d60a57c8ac6bfed87fa630ecbde1d32181746a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/config.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/model.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d4fdbb92cfdebc647a3146ad7b7acc3c23996b3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/model.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/config.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/config.py new file mode 100644 index 0000000000000000000000000000000000000000..67a3c323a299063900d42a6e464672898b13be7c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/config.py @@ -0,0 +1,61 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum +from dataclasses import dataclass, field +from typing import Optional, Union + +from peft.tuners.prompt_tuning import PromptTuningConfig +from peft.utils import PeftType + + +class MultitaskPromptTuningInit(str, enum.Enum): + # initialize prompt with text + TEXT = "TEXT" + # initialize prompt with random matrix + RANDOM = "RANDOM" + # average the prefix and column matrices obtained during source training + AVERAGE_SOURCE_TASKS = "AVERAGE_SOURCE_TASKS" + # pick prefix and column matrices for a particular task obtained during source training + EXACT_SOURCE_TASK = "EXACT_SOURCE_TASK" + # only use the prompt embeddings trained during source training + ONLY_SOURCE_SHARED = "ONLY_SOURCE_SHARED" + + +@dataclass +class MultitaskPromptTuningConfig(PromptTuningConfig): + prompt_tuning_init: Union[MultitaskPromptTuningInit, str] = field( + default=MultitaskPromptTuningInit.RANDOM, + metadata={ + "help": ( + "How to initialize the prompt tuning parameters. Can be one of TEXT, RANDOM, AVERAGE_SOURCE_TASKS, " + "EXACT_SOURCE_TASK, ONLY_SOURCE_SHARED." + ), + }, + ) + prompt_tuning_init_state_dict_path: Optional[str] = field( + default=None, + metadata={ + "help": ( + "The path of source state dict. This is required when training the downstream target prompt from " + "the pretrained source prompt" + ), + }, + ) + prompt_tuning_init_task: Optional[int] = field(default=0, metadata={"help": "source task id for initialization"}) + num_ranks: Optional[int] = field(default=1, metadata={"help": "ranks"}) + num_tasks: Optional[int] = field(default=1, metadata={"help": "number of tasks"}) + + def __post_init__(self): + self.peft_type = PeftType.MULTITASK_PROMPT_TUNING diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/model.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/model.py new file mode 100644 index 0000000000000000000000000000000000000000..66498c9f00deddbf3259a4f1095a0c5d4202b0d2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/model.py @@ -0,0 +1,115 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch + +from peft.tuners.prompt_tuning import PromptEmbedding +from peft.utils import TaskType + +from .config import MultitaskPromptTuningConfig, MultitaskPromptTuningInit + + +# This code is adapted for the paper: https://arxiv.org/abs/2303.02861 and +# constitutes the work done at MIT-IBM Watson Research Lab. + + +class MultitaskPromptEmbedding(PromptEmbedding): + def __init__(self, config: MultitaskPromptTuningConfig, word_embeddings): + super().__init__(config, word_embeddings) + + self.num_tasks = config.num_tasks + self.num_ranks = config.num_ranks + self.num_virtual_tokens = config.num_virtual_tokens + + self.num_transformer_submodules = config.num_transformer_submodules + if self.num_transformer_submodules is None: + self.num_transformer_submodules = 2 if config.task_type == TaskType.SEQ_2_SEQ_LM else 1 + + self.token_dim = config.token_dim + + total_virtual_tokens = self.num_virtual_tokens * self.num_transformer_submodules + + self.prefix_task_cols = torch.nn.Parameter( + torch.normal( + mean=0, + std=0.02, + size=(self.num_tasks, total_virtual_tokens, self.num_ranks), + ) + ) + self.prefix_task_rows = torch.nn.Parameter( + torch.normal( + mean=0, + std=0.02, + size=(self.num_tasks, self.num_ranks, self.token_dim), + ) + ) + + if config.prompt_tuning_init in [ + MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS, + MultitaskPromptTuningInit.EXACT_SOURCE_TASK, + MultitaskPromptTuningInit.ONLY_SOURCE_SHARED, + ]: + if config.prompt_tuning_init_state_dict_path is None: + raise ValueError( + f"prompt_tuning_init_state_dict_path needs to be specified with {config.prompt_tuning_init} " + "init method" + ) + + # TODO: There should be an option for safetensors + state_dict: dict = torch.load( + config.prompt_tuning_init_state_dict_path, + map_location=word_embeddings.weight.device, + ) + + if config.prompt_tuning_init in [ + MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS, + MultitaskPromptTuningInit.EXACT_SOURCE_TASK, + ]: + prefix_task_cols_: torch.Tensor = state_dict["prefix_task_cols"] + prefix_task_rows_: torch.Tensor = state_dict["prefix_task_rows"] + + if config.prompt_tuning_init == MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS: + prefix_task_cols_ = prefix_task_cols_.mean(0, keepdim=True) + prefix_task_rows_ = prefix_task_rows_.mean(0, keepdim=True) + elif config.prompt_tuning_init == MultitaskPromptTuningInit.EXACT_SOURCE_TASK: + prefix_task_cols_ = prefix_task_cols_[config.prompt_tuning_init_task, ...].unsqueeze(0) + prefix_task_rows_ = prefix_task_rows_[config.prompt_tuning_init_task, ...].unsqueeze(0) + + state_dict = { + "embedding.weight": state_dict["prompt_embeddings"], + "prefix_task_cols": prefix_task_cols_, + "prefix_task_rows": prefix_task_rows_, + } + + self.load_state_dict(state_dict, strict=True) + elif config.prompt_tuning_init == MultitaskPromptTuningInit.ONLY_SOURCE_SHARED: + state_dict = { + "embedding.weight": state_dict["prompt_embeddings"], + } + + self.load_state_dict(state_dict, strict=False) + + def forward(self, indices, task_ids): + if task_ids is None: + raise ValueError("task_ids cannot be None") + + prompt_embeddings = self.embedding(indices) + + task_cols = torch.index_select(self.prefix_task_cols, 0, task_ids) + task_rows = torch.index_select(self.prefix_task_rows, 0, task_ids) + task_prompts = torch.matmul(task_cols, task_rows) + + prompt_embeddings *= task_prompts + + return prompt_embeddings diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/oft/__init__.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/oft/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..52ac7131e24bd5cf39bf97ab6336ed1f1d46e152 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/oft/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .config import OFTConfig +from .layer import Conv2d, Linear, OFTLayer +from .model import OFTModel + + +__all__ = ["OFTConfig", "OFTModel", "Conv2d", "Linear", "OFTLayer"] diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/oft/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/oft/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73066bdeaee6e95df34ab84394067245a6e4139c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/oft/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/oft/__pycache__/config.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/oft/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4955dea55d8412a53aa4d972f270325b7756116 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/oft/__pycache__/config.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/oft/__pycache__/layer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/oft/__pycache__/layer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a3960974b33dd5c4e2e596bddac49ed8784826b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/oft/__pycache__/layer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/oft/__pycache__/model.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/oft/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5f66b1ba5a8c208caf929a8e9269d09d54c0f81 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/oft/__pycache__/model.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/oft/config.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/oft/config.py new file mode 100644 index 0000000000000000000000000000000000000000..ba3b9a4401abd6a17840bc6944baaa9f0085fb39 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/oft/config.py @@ -0,0 +1,119 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass, field +from typing import List, Optional, Union + +from peft.tuners.lycoris_utils import LycorisConfig +from peft.utils import PeftType + + +@dataclass +class OFTConfig(LycorisConfig): + """ + This is the configuration class to store the configuration of a [`OFTModel`]. + + Args: + r (`int`): OFT rank. + module_dropout (`int`): The dropout probability for disabling OFT modules during training. + target_modules (`Optional[Union[List[str], str]]`): + The names of the modules to apply the adapter to. If this is specified, only the modules with the specified + names will be replaced. When passing a string, a regex match will be performed. When passing a list of + strings, either an exact match will be performed or it is checked if the name of the module ends with any + of the passed strings. If this is specified as 'all-linear', then all linear modules are chosen, excluding + the output layer. If this is not specified, modules will be chosen according to the model architecture. If + the architecture is not known, an error will be raised -- in this case, you should specify the target + modules manually. + init_weights (`bool`): + Whether to perform initialization of OFT weights. + layers_to_transform (`Union[List[int], int]`): + The layer indices to transform. If a list of ints is passed, it will apply the adapter to the layer indices + that are specified in this list. If a single integer is passed, it will apply the transformations on the + layer at this index. + layers_pattern (`str`): + The layer pattern name, used only if `layers_to_transform` is different from `None`. + rank_pattern (`dict`): + The mapping from layer names or regexp expression to ranks which are different from the default rank + specified by `r`. + modules_to_save (`List[str]`): + List of modules apart from adapter layers to be set as trainable and saved in the final checkpoint. + coft (`bool`): + Whether to use the constrained variant of OFT or not, off by default. + eps (`float`): + The control strength of COFT. The freedom of rotation. Only has an effect if `coft` is set to True. + block_share (`bool`): + Whether to share the OFT parameters between blocks or not. This is `False` by default. + """ + + r: int = field(default=8, metadata={"help": "OFT rank"}) + module_dropout: float = field( + default=0.0, metadata={"help": "The dropout probability for disabling OFT modules during training"} + ) + target_modules: Optional[Union[List[str], str]] = field( + default=None, + metadata={ + "help": "List of module names or regex expression of the module names to replace with OFT." + "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' " + "This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer." + }, + ) + init_weights: bool = field( + default=True, + metadata={ + "help": ( + "Whether to initialize the weights of the OFT layers with their default initialization. Don't change " + "this setting, except if you know exactly what you're doing." + ), + }, + ) + layers_to_transform: Optional[Union[List[int], int]] = field( + default=None, + metadata={ + "help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index." + }, + ) + layers_pattern: Optional[str] = field( + default=None, + metadata={ + "help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern." + }, + ) + modules_to_save: Optional[List[str]] = field( + default=None, + metadata={ + "help": "List of modules apart from OFT layers to be set as trainable and saved in the final checkpoint. " + "For example, in Sequence Classification or Token Classification tasks, " + "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved." + }, + ) + coft: bool = field( + default=False, + metadata={"help": "Whether to use the constrained variant of OFT or not."}, + ) + eps: float = field( + default=6e-5, + metadata={ + "help": "The control strength of COFT. The freedom of rotation. Only has an effect if `coft` is set to True." + }, + ) + block_share: bool = field( + default=False, + metadata={"help": "Whether to share the OFT parameters between blocks or not."}, + ) + + def __post_init__(self): + self.peft_type = PeftType.OFT + self.target_modules = ( + set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules + ) diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/oft/layer.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/oft/layer.py new file mode 100644 index 0000000000000000000000000000000000000000..f4427304b5a739116f0dbca5582603a932518980 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/oft/layer.py @@ -0,0 +1,388 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import warnings +from typing import Any, List, Optional, Set, Tuple + +import torch +import torch.nn as nn + +from peft.tuners.lycoris_utils import LycorisLayer, check_adapters_to_merge + + +class OFTLayer(nn.Module, LycorisLayer): + # All names of layers that may contain adapter weights + adapter_layer_names = ("oft_r",) + # other_param_names is defined on parent class + + def __init__(self, base_layer: nn.Module): + super().__init__() + LycorisLayer.__init__(self, base_layer) + + # OFT info + self.oft_r = nn.ParameterDict({}) + self.coft = {} + self.eps = {} + self.block_share = {} + + @property + def _available_adapters(self) -> Set[str]: + return {*self.oft_r} + + def create_adapter_parameters(self, adapter_name: str, r: int, shape: Tuple[int, ...], block_share: bool): + if block_share: + self.oft_r[adapter_name] = nn.Parameter(torch.empty(1, math.ceil(shape[0] / r), math.ceil(shape[0] / r))) + else: + self.oft_r[adapter_name] = nn.Parameter(torch.empty(r, math.ceil(shape[0] / r), math.ceil(shape[0] / r))) + + def reset_adapter_parameters(self, adapter_name: str): + nn.init.zeros_(self.oft_r[adapter_name]) + + def reset_adapter_parameters_random(self, adapter_name: str): + nn.init.kaiming_uniform_(self.oft_r[adapter_name], a=math.sqrt(5)) + + def update_layer( + self, + adapter_name: str, + r: int, + module_dropout: float, + init_weights: bool, + coft: bool = False, + eps: float = 6e-5, + block_share: bool = False, + **kwargs, + ) -> None: + """Internal function to create oft adapter + + Args: + adapter_name (`str`): Name for the adapter to add. + r (`int`): Rank for the added adapter. + module_dropout (`float`): The dropout probability for disabling adapter during training. + init_weights (`bool`): Whether to initialize weights. + coft (`bool`): Whether to use the constrained variant of OFT or not. + eps (`float`): + The control strength of COFT. The freedom of rotation. Only has an effect if `coft` is set to True. + block_share (`bool`): Whether to share the OFT parameters between blocks or not. + """ + if r <= 0: + raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") + + self.r[adapter_name] = r + self.module_dropout[adapter_name] = module_dropout + self.coft[adapter_name] = coft + self.block_share[adapter_name] = block_share + + # Determine shape of OFT weights + base_layer = self.get_base_layer() + if isinstance(base_layer, nn.Linear): + shape = tuple(base_layer.weight.shape) + elif isinstance(base_layer, nn.Conv2d): + shape = ( + base_layer.out_channels, + base_layer.in_channels * base_layer.kernel_size[0] * base_layer.kernel_size[1], + ) + else: + raise TypeError(f"OFT is not implemented for base layers of type {type(base_layer).__name__}") + + self.eps[adapter_name] = eps * math.ceil(shape[0] / r) * math.ceil(shape[0] / r) + + # Create weights with provided shape + self.create_adapter_parameters(adapter_name, r, shape, block_share) + + # Initialize weights + if init_weights: + self.reset_adapter_parameters(adapter_name) + else: + self.reset_adapter_parameters_random(adapter_name) + + # Move new weights to device + weight = getattr(self.get_base_layer(), "weight", None) + if weight is not None: + # the layer is already completely initialized, this is an update + if weight.dtype.is_floating_point or weight.dtype.is_complex: + self.to(weight.device, dtype=weight.dtype) + else: + self.to(weight.device) + self.set_adapter(self.active_adapters) + + def unscale_layer(self, scale=None) -> None: + # scale is not used + pass + + def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None: + """ + Merge the active adapter weights into the base weights + + Args: + safe_merge (`bool`, *optional*): + If `True`, the merge operation will be performed in a copy of the original weights and check for NaNs + before merging the weights. This is useful if you want to check if the merge operation will produce + NaNs. Defaults to `False`. + adapter_names (`List[str]`, *optional*): + The list of adapter names that should be merged. If `None`, all active adapters will be merged. + Defaults to `None`. + """ + adapter_names = check_adapters_to_merge(self, adapter_names) + if not adapter_names: + # no adapter to merge + return + + for active_adapter in adapter_names: + if active_adapter in self._available_adapters: + base_layer = self.get_base_layer() + + orig_weights = base_layer.weight.data + if isinstance(base_layer, nn.Linear): + orig_weights = torch.transpose(orig_weights, 0, 1) + elif isinstance(base_layer, nn.Conv2d): + orig_weights = orig_weights.view( + [ + base_layer.out_channels, + base_layer.in_channels * base_layer.kernel_size[0] * base_layer.kernel_size[1], + ] + ) + orig_weights = torch.transpose(orig_weights, 0, 1) + delta_weight = self.get_delta_weight(active_adapter) + if orig_weights.shape[1] != delta_weight.shape[1]: + # when in channels is not divisible by r + delta_weight = delta_weight[: orig_weights.shape[1], : orig_weights.shape[1]] + new_weights = torch.mm(orig_weights, delta_weight) + if isinstance(base_layer, nn.Linear): + new_weights = torch.transpose(new_weights, 0, 1) + elif isinstance(base_layer, nn.Conv2d): + new_weights = torch.transpose(new_weights, 0, 1) + new_weights = new_weights.view( + [ + base_layer.out_channels, + base_layer.in_channels, + base_layer.kernel_size[0], + base_layer.kernel_size[1], + ] + ) + + if safe_merge and not torch.isfinite(new_weights).all(): + raise ValueError( + f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" + ) + + base_layer.weight.data = new_weights + self.merged_adapters.append(active_adapter) + + def unmerge(self) -> None: + """ + This method unmerges all merged adapter layers from the base weights. + """ + if not self.merged: + warnings.warn("Already unmerged. Nothing to do.") + return + while len(self.merged_adapters) > 0: + active_adapter = self.merged_adapters.pop() + if active_adapter in self._available_adapters: + base_layer = self.get_base_layer() + new_weights = base_layer.weight.data + if isinstance(base_layer, nn.Linear): + new_weights = torch.transpose(new_weights, 0, 1) + elif isinstance(base_layer, nn.Conv2d): + new_weights = new_weights.view( + [ + base_layer.out_channels, + base_layer.in_channels * base_layer.kernel_size[0] * base_layer.kernel_size[1], + ] + ) + new_weights = torch.transpose(new_weights, 0, 1) + delta_weight = self.get_delta_weight(active_adapter) + if new_weights.shape[1] != delta_weight.shape[1]: + # when in channels is not divisible by r + delta_weight = delta_weight[: new_weights.shape[1], : new_weights.shape[1]] + delta_inv = torch.inverse(delta_weight) + orig_weights = torch.mm(new_weights, delta_inv) + + if isinstance(base_layer, nn.Linear): + orig_weights = torch.transpose(orig_weights, 0, 1) + elif isinstance(base_layer, nn.Conv2d): + orig_weights = torch.transpose(orig_weights, 0, 1) + orig_weights = orig_weights.reshape( + [ + base_layer.out_channels, + base_layer.in_channels, + base_layer.kernel_size[0], + base_layer.kernel_size[1], + ] + ) + base_layer.weight.data = orig_weights + + def get_delta_weight(self, adapter_name: str) -> torch.Tensor: + rank = self.r[adapter_name] + coft = self.coft[adapter_name] + eps = self.eps[adapter_name] + opt_r = self.oft_r[adapter_name] + + if coft: + with torch.no_grad(): + opt_r.copy_(self._project_batch(opt_r, eps=eps)) + + orth_rotate = self._cayley_batch(opt_r) + weight = self._block_diagonal(orth_rotate, rank) + + return weight + + # Copied from https://github.com/Zeju1997/oft/blob/84cebb965df69781e3d9c3c875f5980b421eaf24/oft-control/oft.py#L144 + def _cayley_batch(self, data: torch.Tensor) -> torch.Tensor: + b, r, c = data.shape + # Ensure the input matrix is skew-symmetric + skew = 0.5 * (data - data.transpose(1, 2)) + I = torch.eye(r, device=data.device).unsqueeze(0).expand(b, r, c) # noqa: E741 + + # Perform the Cayley parametrization + Q = torch.bmm(I - skew, torch.inverse(I + skew)) + + return Q + + # Copied from https://github.com/Zeju1997/oft/blob/84cebb965df69781e3d9c3c875f5980b421eaf24/oft-control/oft.py#L155 + def _block_diagonal(self, oft_r: torch.Tensor, rank: int) -> torch.Tensor: + if oft_r.shape[0] == 1: + # block share + blocks = [oft_r[0, ...] for i in range(rank)] + else: + blocks = [oft_r[i, ...] for i in range(rank)] + + # Use torch.block_diag to create the block diagonal matrix + A = torch.block_diag(*blocks) + + return A + + # Copied from https://github.com/Zeju1997/oft/blob/84cebb965df69781e3d9c3c875f5980b421eaf24/oft-control/oft.py#L52 + def _project_batch(self, oft_r, eps=1e-5): + # scaling factor for each of the smaller block matrix + eps = eps * 1 / torch.sqrt(torch.tensor(oft_r.shape[0])) + I = ( # noqa: E741 + torch.zeros((oft_r.size(1), oft_r.size(1)), device=oft_r.device, dtype=oft_r.dtype) + .unsqueeze(0) + .expand_as(oft_r) + ) + diff = oft_r - I + norm_diff = torch.norm(oft_r - I, dim=(1, 2), keepdim=True) + mask = (norm_diff <= eps).bool() + out = torch.where(mask, oft_r, I + eps * (diff / norm_diff)) + return out + + def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: + previous_dtype = x.dtype + + if self.disable_adapters: + if self.merged: + self.unmerge() + result = self.base_layer(x, *args, **kwargs) + elif self.merged: + result = self.base_layer(x, *args, **kwargs) + else: + result = self.base_layer(x, *args, **kwargs) + if len(result.shape) == 4: + result = result.permute(0, 2, 3, 1) + + base_layer = self.get_base_layer() + base_bias = base_layer.bias + if base_bias is not None: + # Bias should be added after OFT forward + result = result - base_bias.data + + # Execute all the adapters + for active_adapter in self.active_adapters: + if active_adapter not in self._available_adapters: + continue + + module_dropout = self.module_dropout[active_adapter] + + # Modify current execution weights + if (not self.training) or (self.training and torch.rand(1) > module_dropout): + result = self._get_delta_activations(active_adapter, result, *args, **kwargs) + + if base_bias is not None: + result = result + base_bias.data + if len(result.shape) == 4: + result = result.permute(0, 3, 1, 2) + + result = result.to(previous_dtype) + return result + + +class Linear(OFTLayer): + """OFT implemented in Linear layer""" + + def __init__( + self, + base_layer: nn.Module, + adapter_name: str = "default", + r: int = 0, + module_dropout: float = 0.0, + init_weights: bool = True, + **kwargs, + ): + super().__init__(base_layer) + + # Create adapter and set it active + self._active_adapter = adapter_name + self.update_layer(adapter_name, r, module_dropout, init_weights, **kwargs) + + def _get_delta_activations( + self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any + ) -> torch.Tensor: + delta_weight = self.get_delta_weight(adapter_name) + + base_layer = self.get_base_layer() + base_weight = base_layer.weight.data + delta_weight = delta_weight[: base_weight.shape[0], : base_weight.shape[0]] + + # don't add bias here, because the bias will be added after OFT forward + return torch.matmul(input, delta_weight) + + def __repr__(self) -> str: + rep = super().__repr__() + return "oft." + rep + + +class Conv2d(OFTLayer): + """OFT implemented in Conv2d layer""" + + def __init__( + self, + base_layer: nn.Module, + adapter_name: str = "default", + r: int = 0, + module_dropout: float = 0.0, + init_weights: bool = True, + **kwargs, + ): + super().__init__(base_layer) + + # Create adapter and set it active + self._active_adapter = adapter_name + self.update_layer(adapter_name, r, module_dropout, init_weights, **kwargs) + + def _get_delta_activations( + self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any + ) -> torch.Tensor: + delta_weight = self.get_delta_weight(adapter_name) + + base_layer = self.get_base_layer() + base_weight = base_layer.weight.data + delta_weight = delta_weight[: base_weight.shape[0], : base_weight.shape[0]] + + # don't add bias here, because the bias will be added after OFT forward + return torch.matmul(input, delta_weight) + + def __repr__(self) -> str: + rep = super().__repr__() + return "oft." + rep diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/oft/model.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/oft/model.py new file mode 100644 index 0000000000000000000000000000000000000000..fd96325c6f0a6d7fd87b77e033d5bf49a9050752 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/oft/model.py @@ -0,0 +1,106 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +from typing import Dict, Type, Union + +import torch +from torch import nn + +from peft.tuners.lycoris_utils import LycorisConfig, LycorisTuner + +from .layer import Conv2d, Linear, OFTLayer + + +class OFTModel(LycorisTuner): + """ + Creates Orthogonal Finetuning model from a pretrained model. The method is described in + https://arxiv.org/abs/2306.07280 + + Args: + model (`torch.nn.Module`): The model to which the adapter tuner layers will be attached. + config ([`OFTConfig`]): The configuration of the OFT model. + adapter_name (`str`): The name of the adapter, defaults to `"default"`. + + Returns: + `torch.nn.Module`: The OFT model. + + Example: + ```py + >>> from diffusers import StableDiffusionPipeline + >>> from peft import OFTModel, OFTConfig + + >>> config_te = OFTConfig( + ... r=8, + ... target_modules=["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"], + ... module_dropout=0.0, + ... init_weights=True, + ... ) + >>> config_unet = OFTConfig( + ... r=8, + ... target_modules=[ + ... "proj_in", + ... "proj_out", + ... "to_k", + ... "to_q", + ... "to_v", + ... "to_out.0", + ... "ff.net.0.proj", + ... "ff.net.2", + ... ], + ... module_dropout=0.0, + ... init_weights=True, + ... ) + + >>> model = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") + >>> model.text_encoder = OFTModel(model.text_encoder, config_te, "default") + >>> model.unet = OFTModel(model.unet, config_unet, "default") + ``` + + **Attributes**: + - **model** ([`~torch.nn.Module`]) -- The model to be adapted. + - **peft_config** ([`OFTConfig`]): The configuration of the OFT model. + """ + + prefix: str = "oft_" + layers_mapping: Dict[Type[torch.nn.Module], Type[OFTLayer]] = { + torch.nn.Conv2d: Conv2d, + torch.nn.Linear: Linear, + } + + def _create_and_replace( + self, + config: LycorisConfig, + adapter_name: str, + target: Union[OFTLayer, nn.Module], + target_name: str, + parent: nn.Module, + current_key: str, + ) -> None: + """ + A private method to create and replace the target module with the adapter module. + """ + + # Regexp matching - Find key which matches current target_name in patterns provided + pattern_keys = list(config.rank_pattern.keys()) + target_name_key = next(filter(lambda key: re.match(rf"(.*\.)?{key}$", current_key), pattern_keys), target_name) + + kwargs = config.to_dict() + kwargs["r"] = config.rank_pattern.get(target_name_key, config.r) + + if isinstance(target, OFTLayer): + target.update_layer(adapter_name, **kwargs) + else: + new_module = self._create_new_module(config, adapter_name, target, **kwargs) + self._replace_module(parent, target_name, new_module, target) diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd4a6972dde9a40ddaaf705008c71c591d3f534f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/config.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..172603fa18b1cde06fff229d2c4b9372232c6271 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/config.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/layer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/layer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0d9bf9ba1f0eab6179d82df0142ad33da6c0fbf Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/layer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/model.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42fc7423ffed82b735dd22d9193f29d5f8473f62 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/model.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/router.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/router.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea26b9e9fe9c723c711781fd0af4e40c055df27c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/router.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/model.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/model.py new file mode 100644 index 0000000000000000000000000000000000000000..943a287955ad060289b1a4aa0f9c4f1e493ccbce --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/model.py @@ -0,0 +1,187 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from contextlib import contextmanager +from dataclasses import asdict +from enum import Enum +from typing import Any + +import torch +from torch import nn + +from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists +from peft.utils import ( + TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, + ModulesToSaveWrapper, +) + +from .config import PolyConfig +from .layer import Linear, PolyLayer + + +class PolyModel(BaseTuner): + prefix: str = "poly_" + + def __init__(self, model, config, adapter_name) -> None: + super().__init__(model, config, adapter_name) + + @staticmethod + def _check_target_module_exists(poly_config, key): + return check_target_module_exists(poly_config, key) + + def _create_and_replace( + self, + poly_config: PolyConfig, + adapter_name: str, + target: nn.Module, + target_name: str, + parent: nn.Module, + **optional_kwargs: Any, + ): + if isinstance(target, PolyLayer): + target.update_layer(adapter_name, poly_config) + else: + new_module = self._create_new_module( + poly_config, + adapter_name, + target, + ) + if adapter_name != self.active_adapter: + # adding an additional adapter: it is not automatically trainable + new_module.requires_grad_(False) + self._replace_module(parent, target_name, new_module, target) + + def _replace_module(self, parent, child_name, new_module, child): + setattr(parent, child_name, new_module) + # It's not necessary to set requires_grad here, as that is handled by + # _mark_only_adapters_as_trainable + + # child layer wraps the original module, unpack it + if hasattr(child, "base_layer"): + child = child.base_layer + + if not hasattr(new_module, "base_layer"): + new_module.weight = child.weight + if hasattr(child, "bias"): + new_module.bias = child.bias + + if getattr(child, "state", None) is not None: + if hasattr(new_module, "base_layer"): + new_module.base_layer.state = child.state + else: + new_module.state = child.state + new_module.to(child.weight.device) + + # dispatch to correct device + for name, module in new_module.named_modules(): + if (self.prefix in name) or ("ranknum" in name): + weight = child.qweight if hasattr(child, "qweight") else child.weight + module.to(weight.device) + + def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None: + for n, p in model.named_parameters(): + if self.prefix not in n: + p.requires_grad = False + + @staticmethod + def _create_new_module(poly_config, adapter_name, target, **kwargs): + if isinstance(target, BaseTunerLayer): + target_base_layer = target.get_base_layer() + else: + target_base_layer = target + + if isinstance(target_base_layer, torch.nn.Linear): + return Linear(target, adapter_name, poly_config, **kwargs) + else: + raise ValueError( + f"Target module {target} is not supported. Currently, only the following modules are supported: " + "`torch.nn.Linear`." + ) + + def __getattr__(self, name: str): + """Forward missing attributes to the wrapped module.""" + try: + return super().__getattr__(name) # defer to nn.Module's logic + except AttributeError: + return getattr(self.model, name) + + def get_peft_config_as_dict(self, inference: bool = False): + config_dict = {} + for key, value in self.peft_config.items(): + config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()} + if inference: + config["inference_mode"] = True + config_dict[key] = config + return config + + def _set_adapter_layers(self, enabled=True): + for module in self.model.modules(): + if isinstance(module, (PolyLayer, ModulesToSaveWrapper)): + module.enable_adapters(enabled) + + def enable_adapter_layers(self): + self._set_adapter_layers(enabled=True) + + def disable_adapter_layers(self): + self._set_adapter_layers(enabled=False) + + def set_adapter(self, adapter_name): + for module in self.model.modules(): + if isinstance(module, PolyLayer): + module.set_adapter(adapter_name) + + def _prepare_adapter_config(self, peft_config, model_config): + if peft_config.target_modules is None: + if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING: + raise ValueError("Please specify `target_modules` in `peft_config`") + peft_config.target_modules = set( + TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config["model_type"]] + ) + return peft_config + + def _register_pre_hooks(self, task_ids): + """Helper method to register pre hooks.""" + if task_ids is None: + return [] + + def pre_hook(_, args, kwargs): + kwargs["task_ids"] = task_ids + return args, kwargs + + handles = [] + + for module in self.model.modules(): + if isinstance(module, Linear): + handle = module.register_forward_pre_hook(pre_hook, with_kwargs=True) + handles.append(handle) + + return handles + + @contextmanager + def _manage_pre_hooks(self, task_ids): + """Context manager to handle the lifecycle of pre hooks.""" + handles = self._register_pre_hooks(task_ids) + try: + yield + finally: + for handle in handles: + handle.remove() + + def forward(self, *args, task_ids=None, **kwargs): + with self._manage_pre_hooks(task_ids): + return self.model(*args, **kwargs) + + def generate(self, *args, task_ids=None, **kwargs): + with self._manage_pre_hooks(task_ids): + return self.model.generate(*args, **kwargs) diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__init__.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..28f4bedbb43bcf2b22146d60e0e1f2fe7b19d9eb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .config import PrefixTuningConfig +from .model import PrefixEncoder + + +__all__ = ["PrefixTuningConfig", "PrefixEncoder"] diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6dcee0c35caa17676df43a0bb6780d4ab84fd58 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__pycache__/config.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0677db70c00f7e83037dd0cad9a94a7ef478b55 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__pycache__/config.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__pycache__/model.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d6239ffa24ab7e8cf42135152b909863edc4b81 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__pycache__/model.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/prefix_tuning/config.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/prefix_tuning/config.py new file mode 100644 index 0000000000000000000000000000000000000000..39203ff7beb571f067331798051e085a49273211 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/prefix_tuning/config.py @@ -0,0 +1,41 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass, field + +from peft.config import PromptLearningConfig +from peft.utils import PeftType + + +@dataclass +class PrefixTuningConfig(PromptLearningConfig): + """ + This is the configuration class to store the configuration of a [`PrefixEncoder`]. + + Args: + encoder_hidden_size (`int`): The hidden size of the prompt encoder. + prefix_projection (`bool`): Whether to project the prefix embeddings. + """ + + encoder_hidden_size: int = field( + default=None, + metadata={"help": "The hidden size of the encoder"}, + ) + prefix_projection: bool = field( + default=False, + metadata={"help": "Whether to project the prefix tokens"}, + ) + + def __post_init__(self): + self.peft_type = PeftType.PREFIX_TUNING diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/prefix_tuning/model.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/prefix_tuning/model.py new file mode 100644 index 0000000000000000000000000000000000000000..ffd51892a3cc074406791f6bc7d1b088d25148e3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/prefix_tuning/model.py @@ -0,0 +1,80 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Based on https://github.com/THUDM/P-tuning-v2/blob/main/model/prefix_encoder.py +# with some refactor +import torch + + +class PrefixEncoder(torch.nn.Module): + r""" + The `torch.nn` model to encode the prefix. + + Args: + config ([`PrefixTuningConfig`]): The configuration of the prefix encoder. + + Example: + + ```py + >>> from peft import PrefixEncoder, PrefixTuningConfig + + >>> config = PrefixTuningConfig( + ... peft_type="PREFIX_TUNING", + ... task_type="SEQ_2_SEQ_LM", + ... num_virtual_tokens=20, + ... token_dim=768, + ... num_transformer_submodules=1, + ... num_attention_heads=12, + ... num_layers=12, + ... encoder_hidden_size=768, + ... ) + >>> prefix_encoder = PrefixEncoder(config) + ``` + + **Attributes**: + - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prefix encoder. + - **transform** (`torch.nn.Sequential`) -- The two-layer MLP to transform the prefix embeddings if + `prefix_projection` is `True`. + - **prefix_projection** (`bool`) -- Whether to project the prefix embeddings. + + Input shape: (`batch_size`, `num_virtual_tokens`) + + Output shape: (`batch_size`, `num_virtual_tokens`, `2*layers*hidden`) + """ + + def __init__(self, config): + super().__init__() + self.prefix_projection = config.prefix_projection + token_dim = config.token_dim + num_layers = config.num_layers + encoder_hidden_size = config.encoder_hidden_size + num_virtual_tokens = config.num_virtual_tokens + if self.prefix_projection and not config.inference_mode: + # Use a two-layer MLP to encode the prefix + self.embedding = torch.nn.Embedding(num_virtual_tokens, token_dim) + self.transform = torch.nn.Sequential( + torch.nn.Linear(token_dim, encoder_hidden_size), + torch.nn.Tanh(), + torch.nn.Linear(encoder_hidden_size, num_layers * 2 * token_dim), + ) + else: + self.embedding = torch.nn.Embedding(num_virtual_tokens, num_layers * 2 * token_dim) + + def forward(self, prefix: torch.Tensor): + if self.prefix_projection: + prefix_tokens = self.embedding(prefix) + past_key_values = self.transform(prefix_tokens) + else: + past_key_values = self.embedding(prefix) + return past_key_values diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__init__.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..71795b61d819573ff41770e6d49c750e6c51b0ae --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .config import PromptTuningConfig, PromptTuningInit +from .model import PromptEmbedding + + +__all__ = ["PromptTuningConfig", "PromptEmbedding", "PromptTuningInit"] diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/prompt_tuning/config.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/prompt_tuning/config.py new file mode 100644 index 0000000000000000000000000000000000000000..d9987e112abc389f623a6ae4f7df90bd70c8439a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/prompt_tuning/config.py @@ -0,0 +1,86 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum +from dataclasses import dataclass, field +from typing import Optional, Union + +from peft.config import PromptLearningConfig +from peft.utils import PeftType + + +class PromptTuningInit(str, enum.Enum): + TEXT = "TEXT" + RANDOM = "RANDOM" + + +@dataclass +class PromptTuningConfig(PromptLearningConfig): + """ + This is the configuration class to store the configuration of a [`PromptEmbedding`]. + + Args: + prompt_tuning_init (Union[[`PromptTuningInit`], `str`]): The initialization of the prompt embedding. + prompt_tuning_init_text (`str`, *optional*): + The text to initialize the prompt embedding. Only used if `prompt_tuning_init` is `TEXT`. + tokenizer_name_or_path (`str`, *optional*): + The name or path of the tokenizer. Only used if `prompt_tuning_init` is `TEXT`. + tokenizer_kwargs (`dict`, *optional*): + The keyword arguments to pass to `AutoTokenizer.from_pretrained`. Only used if `prompt_tuning_init` is + `TEXT`. + """ + + prompt_tuning_init: Union[PromptTuningInit, str] = field( + default=PromptTuningInit.RANDOM, + metadata={"help": "How to initialize the prompt tuning parameters"}, + ) + prompt_tuning_init_text: Optional[str] = field( + default=None, + metadata={ + "help": "The text to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`" + }, + ) + tokenizer_name_or_path: Optional[str] = field( + default=None, + metadata={ + "help": "The tokenizer to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`" + }, + ) + + tokenizer_kwargs: Optional[dict] = field( + default=None, + metadata={ + "help": ( + "The keyword arguments to pass to `AutoTokenizer.from_pretrained`. Only used if prompt_tuning_init is " + "`TEXT`" + ), + }, + ) + + def __post_init__(self): + self.peft_type = PeftType.PROMPT_TUNING + if (self.prompt_tuning_init == PromptTuningInit.TEXT) and not self.tokenizer_name_or_path: + raise ValueError( + f"When prompt_tuning_init='{PromptTuningInit.TEXT.value}', " + f"tokenizer_name_or_path can't be {self.tokenizer_name_or_path}." + ) + if (self.prompt_tuning_init == PromptTuningInit.TEXT) and self.prompt_tuning_init_text is None: + raise ValueError( + f"When prompt_tuning_init='{PromptTuningInit.TEXT.value}', " + f"prompt_tuning_init_text can't be {self.prompt_tuning_init_text}." + ) + if self.tokenizer_kwargs and (self.prompt_tuning_init != PromptTuningInit.TEXT): + raise ValueError( + f"tokenizer_kwargs only valid when using prompt_tuning_init='{PromptTuningInit.TEXT.value}'." + ) diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/prompt_tuning/model.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/prompt_tuning/model.py new file mode 100644 index 0000000000000000000000000000000000000000..a04221c2abfd1fb806df2805a7a28e4e3073a32d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/prompt_tuning/model.py @@ -0,0 +1,89 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math + +import torch + +from .config import PromptTuningInit + + +class PromptEmbedding(torch.nn.Module): + """ + The model to encode virtual tokens into prompt embeddings. + + Args: + config ([`PromptTuningConfig`]): The configuration of the prompt embedding. + word_embeddings (`torch.nn.Module`): The word embeddings of the base transformer model. + + **Attributes**: + - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prompt embedding. + + Example: + + ```py + >>> from peft import PromptEmbedding, PromptTuningConfig + + >>> config = PromptTuningConfig( + ... peft_type="PROMPT_TUNING", + ... task_type="SEQ_2_SEQ_LM", + ... num_virtual_tokens=20, + ... token_dim=768, + ... num_transformer_submodules=1, + ... num_attention_heads=12, + ... num_layers=12, + ... prompt_tuning_init="TEXT", + ... prompt_tuning_init_text="Predict if sentiment of this review is positive, negative or neutral", + ... tokenizer_name_or_path="t5-base", + ... ) + + >>> # t5_model.shared is the word embeddings of the base model + >>> prompt_embedding = PromptEmbedding(config, t5_model.shared) + ``` + + Input Shape: (`batch_size`, `total_virtual_tokens`) + + Output Shape: (`batch_size`, `total_virtual_tokens`, `token_dim`) + """ + + def __init__(self, config, word_embeddings): + super().__init__() + + total_virtual_tokens = config.num_virtual_tokens * config.num_transformer_submodules + self.embedding = torch.nn.Embedding(total_virtual_tokens, config.token_dim) + if config.prompt_tuning_init == PromptTuningInit.TEXT and not config.inference_mode: + from transformers import AutoTokenizer + + tokenizer_kwargs = config.tokenizer_kwargs or {} + tokenizer = AutoTokenizer.from_pretrained(config.tokenizer_name_or_path, **tokenizer_kwargs) + init_text = config.prompt_tuning_init_text + init_token_ids = tokenizer(init_text)["input_ids"] + # Trim or iterate until num_text_tokens matches total_virtual_tokens + num_text_tokens = len(init_token_ids) + if num_text_tokens > total_virtual_tokens: + init_token_ids = init_token_ids[:total_virtual_tokens] + elif num_text_tokens < total_virtual_tokens: + num_reps = math.ceil(total_virtual_tokens / num_text_tokens) + init_token_ids = init_token_ids * num_reps + init_token_ids = init_token_ids[:total_virtual_tokens] + init_token_ids = torch.LongTensor(init_token_ids).to(word_embeddings.weight.device) + + word_embedding_weights = word_embeddings(init_token_ids).detach().clone() + word_embedding_weights = word_embedding_weights.to(torch.float32) + self.embedding.weight = torch.nn.Parameter(word_embedding_weights) + + def forward(self, indices): + # Just get embeddings + prompt_embeddings = self.embedding(indices) + return prompt_embeddings diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/tuners_utils.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/tuners_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..69b26a2bb1be9a2f7ee01e24503ee59ea7d90d29 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/tuners_utils.py @@ -0,0 +1,767 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import copy +import logging +import re +import warnings +from abc import ABC, abstractmethod +from contextlib import contextmanager +from typing import Any, Optional, Union + +import torch +from accelerate.hooks import AlignDevicesHook +from accelerate.utils import named_module_tensors, offload_state_dict +from torch import nn +from transformers import PreTrainedModel +from transformers.pytorch_utils import Conv1D + +from peft.utils import INCLUDE_LINEAR_LAYERS_SHORTHAND + +from ..config import PeftConfig +from ..utils import ModulesToSaveWrapper, _get_submodules + + +logger = logging.getLogger(__name__) + + +@contextmanager +def onload_layer(layer): + r""" + A utility for modifying a module containing one or more tuners and a base layer, any of which are offloaded to the + CPU or disk. Moves a module's sub-modules to the execution device before some action is performed, after that the + base layer state dictionary is re-assigned (if that layer was offloaded to the disk) and finally the parameters are + offloaded. + + If the module has no offloaded sub-modules, this function does nothing. + + Args: + layer ('torch.nn.Module'): + layer with tuners to be merged + """ + + offloaded_modules = [] + for name, module in layer.named_modules(): + if name in ["", "base_layer"]: + continue + if hasattr(module, "_hf_hook") and isinstance(module._hf_hook, AlignDevicesHook) and module._hf_hook.offload: + module._hf_hook.pre_forward(module) + offloaded_modules.append(module) + + base_layer_offload = False + if hasattr(layer, "base_layer") and ( + hasattr(layer.base_layer, "_hf_hook") + and isinstance(layer.base_layer._hf_hook, AlignDevicesHook) + and layer.base_layer._hf_hook.offload + ): + if torch.device("meta") in layer.base_layer._hf_hook.original_devices.values(): + # retrieve the name of the original disk-offload directory + offload_folder = layer.base_layer._hf_hook.weights_map.dataset.save_folder + layer.base_layer._hf_hook.pre_forward(layer.base_layer) + base_layer_offload = True + + yield + + for module in offloaded_modules: + module._hf_hook.post_forward(module, torch.tensor([])) + + if base_layer_offload: + # re-make weights map (must be on cpu to send params to the disk via memmap if disk offload) + layer.base_layer._hf_hook.weights_map = { + name: param.to("cpu") for name, param in named_module_tensors(layer.base_layer) + } + # offload weights map to disk if original device is the disk + if torch.device("meta") in layer.base_layer._hf_hook.original_devices.values(): + # rewrite directory with merged weights + offload_state_dict(offload_folder, layer.base_layer._hf_hook.weights_map) + layer.base_layer._hf_hook.post_forward(layer.base_layer, torch.tensor([])) + + +class BaseTuner(nn.Module, ABC): + r""" + A base tuner model that provides the common methods and attributes for all tuners that are injectable into a + torch.nn.Module + + For adding a new Tuner class, one needs to overwrite the following methods: + + - **_prepare_adapter_config**: + A private method to eventually prepare the adapter config, for example in case the field `target_modules` is + missing. + - **_create_and_replace**: + A private method to create and replace the target module with the adapter module. + - **_check_target_module_exists**: + A private helper method to check if the passed module's key name matches any of the target modules in the + adapter_config. + + The easiest is to check what is done in the `peft.tuners.lora.LoraModel` class. + + Attributes: + model (`torch.nn.Module`): + The model to which the adapter tuner layers will be attached. + forward (`Callable`): + The forward method of the model. + peft_config (`Union[`PeftConfig`, dict[str, PeftConfig]]`): + The adapter configuration object, it should be a dictionary of `str` to `PeftConfig` objects. One can also + pass a PeftConfig object and a new adapter will be created with the default name `adapter` or create a new + dictionary with a key `adapter_name` and a value of that peft config. + config (`dict[str, Any]`): + The model configuration object, it should be a dictionary of `str` to `Any` objects. + targeted_module_names (`list[str]`): + The list of module names that were actually adapted. Can be useful to inspect if you want to quickly + double-check that the `config.target_modules` where specified correctly. + """ + + def __init__(self, model, peft_config: Union[PeftConfig, dict[str, PeftConfig]], adapter_name: str) -> None: + super().__init__() + + self.model = model + self.targeted_module_names: list[str] = [] + + # For advanced developers, if you want to attach multiple adapters to your + # model, just add a `peft_config` dict attribute to your model. + if not hasattr(self, "peft_config"): + self.peft_config = {adapter_name: peft_config} if isinstance(peft_config, PeftConfig) else peft_config + else: + logger.info( + "Already found a `peft_config` attribute in the model. This will lead to having multiple adapters" + " in the model. Make sure to know what you are doing!" + ) + if isinstance(peft_config, PeftConfig): + self.peft_config[adapter_name] = peft_config + else: + # user is adding a dict of PeftConfigs + self.peft_config.update(peft_config) + + self.active_adapter = adapter_name + self.inject_adapter(self.model, adapter_name) + + # Copy the peft_config in the injected model. + self.model.peft_config = self.peft_config + + @property + def active_adapters(self) -> list[str]: + if isinstance(self.active_adapter, str): + return [self.active_adapter] + # is already a list of str + return self.active_adapter + + def forward(self, *args: Any, **kwargs: Any): + return self.model.forward(*args, **kwargs) + + @abstractmethod + def _prepare_adapter_config(self, peft_config: PeftConfig, model_config: dict) -> PeftConfig: + r""" + A private method to eventually prepare the adapter config. For transformers based models, if + `peft_config.target_modules` is None, we can automatically infer the target modules from the + `TRANSFORMERS_MODELS_TO_XXX_TARGET_MODULES_MAPPING`. This method can be further refactored in the future to + automatically infer it for all tuner models. + + Check out `peft.tuner.lora.LoraModel._prepare_adapter_config` for an example. + + Args: + peft_config (`PeftConfig`): + The adapter config. + model_config (`dict`): + The transformers model config, that config should contain the `model_type` key. + """ + ... + + def _prepare_model(self, peft_config: PeftConfig, model: nn.Module): + r""" + A private method to modify the model structure before adapter is applied. + + See `peft.tuner.lora.LoraModel._prepare_model` for an example. + + Args: + peft_config (`PeftConfig`): + The prepared adapter config. + model (`nn.Module`): + The model that is going to be adapted. + """ + pass + + @abstractmethod + def _check_target_module_exists(peft_config: PeftConfig, key: str) -> bool: + r""" + A helper private method to check if the passed module's key name matches any of the target modules in the + `peft_config.target_modules` list. If it does, return `True`, else return `False`. + + Args: + peft_config (`PeftConfig`): + The adapter config. + key (`str`): + The module's key name. + """ + ... + + @abstractmethod + def _create_and_replace( + self, + peft_config: PeftConfig, + adapter_name: str, + target: nn.Module, + target_name: str, + parent: nn.Module, + current_key: str, + ) -> None: + r""" + Inplace replacement of the target module with the adapter layer. This method needs to be overridden by all the + tuner classes. + + Check `peft.tuners.lora.LoraModel._create_and_replace` for an example. + + Args: + peft_config (`PeftConfig`): + The adapter config. + adapter_name (`str`): + The adapter name. + target (`nn.Module`): + The target module. + target_name (`str`): + The target module's name. + parent (`nn.Module`): + The parent module. + current_key (`str`): + The key of the current target being adapted. + """ + ... + + @abstractmethod + def _mark_only_adapters_as_trainable(self, model: nn.Module): + r""" + A helper method to mark only the adapter layers as trainable (i.e. module.requires_grad = False) This needs to + be overridden for all tuner classes to match the correct key names. + + Check `peft.tuners.lora.LoraModel._mark_only_adapters_as_trainable` for an example. + """ + ... + + def _check_new_adapter_config(self, config: PeftConfig) -> None: + """ + A helper method to check the config when a new adapter is being added. + + Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters. + + """ + pass + + def _check_merge_allowed(self): + """Helper method to check whether the adapter can be merged. + + Raise a ValueError if it is not possible to merge the adapter with the given configuration. + """ + pass + + def inject_adapter(self, model: nn.Module, adapter_name: str): + r""" + Creates adapter layers and replaces the target modules with the adapter layers. This method is called under the + hood by `peft.mapping.get_peft_model` if a non-prompt tuning adapter class is passed. + + The corresponding PEFT config is directly retrieved from the `peft_config` attribute of the BaseTuner class. + + Args: + model (`nn.Module`): + The model to be tuned. + adapter_name (`str`): + The adapter name. + """ + peft_config = self.peft_config[adapter_name] + # Note: If possible, all checks should be performed *at the start of this method*. + # This way, we can raise early if something goes wrong, without leaving the model + # in a bad (half-initialized) state. + self._check_new_adapter_config(peft_config) + + _check_for_modules_to_save = getattr(peft_config, "modules_to_save", None) is not None + _has_modules_to_save = False + + model_config = getattr(model, "config", {"model_type": "custom"}) + if hasattr(model_config, "to_dict"): + model_config = model_config.to_dict() + + peft_config = self._prepare_adapter_config(peft_config, model_config) + + self._prepare_model(peft_config, model) + is_target_modules_in_base_model = False + key_list = [key for key, _ in model.named_modules()] + + # update peft_config.target_modules if required + peft_config = _maybe_include_all_linear_layers(peft_config, model) + + for key in key_list: + # Check for modules_to_save in case + if _check_for_modules_to_save and any( + key.endswith(f"{module_to_save}") for module_to_save in peft_config.modules_to_save + ): + # Optionally set the modules to save + parent, target, target_name = _get_submodules(model, key) + + if not isinstance(target, ModulesToSaveWrapper): + new_module = ModulesToSaveWrapper(target, adapter_name) + setattr(parent, target_name, new_module) + else: + target.update(adapter_name) + + _has_modules_to_save = True + continue + + if not self._check_target_module_exists(peft_config, key): + continue + + self.targeted_module_names.append(key) + is_target_modules_in_base_model = True + parent, target, target_name = _get_submodules(model, key) + self._create_and_replace(peft_config, adapter_name, target, target_name, parent, current_key=key) + + if not is_target_modules_in_base_model: + raise ValueError( + f"Target modules {peft_config.target_modules} not found in the base model. " + f"Please check the target modules and try again." + ) + + self._mark_only_adapters_as_trainable(model) + + if self.peft_config[adapter_name].inference_mode: + for n, p in model.named_parameters(): + if adapter_name in n: + p.requires_grad = False + + if _has_modules_to_save: + if not hasattr(model, "modules_to_save"): + model.modules_to_save = set(peft_config.modules_to_save) + else: + model.modules_to_save.update(set(peft_config.modules_to_save)) + + def merge_adapter(self, adapter_names: Optional[list[str]] = None) -> None: + """ + This method merges the adapter layers into the base model. + + Merging adapters can lead to a speed up of the forward pass. A copy of the adapter weights is still kept in + memory, which is required to unmerge the adapters. In order to merge the adapter weights without keeping them + in memory, please call `merge_and_unload`. + + Args: + safe_merge (`bool`, *optional*): + If `True`, the merge operation will be performed in a copy of the original weights and check for NaNs + before merging the weights. This is useful if you want to check if the merge operation will produce + NaNs. Defaults to `False`. + adapter_names (`list[str]`, *optional*): + The list of adapter names that should be merged. If `None`, all active adapters will be merged. + Defaults to `None`. + """ + self._check_merge_allowed() + for module in self.model.modules(): + if isinstance(module, BaseTunerLayer): + with onload_layer(module): + module.merge(adapter_names=adapter_names) + + def unmerge_adapter(self): + """ + This method unmerges all merged adapter layers from the base model. + """ + for module in self.model.modules(): + if isinstance(module, BaseTunerLayer): + with onload_layer(module): + module.unmerge() + + def _unloading_checks(self, adapter_names: Optional[list[str]]): + adapters_to_consider = adapter_names or self.active_adapters + is_modules_to_save_available = any( + self.peft_config[adapter].modules_to_save for adapter in adapters_to_consider + ) + if is_modules_to_save_available and len(adapters_to_consider) > 1: + raise ValueError("Cannot unload multiple adapters that specify `modules_to_save`.") + + +class BaseTunerLayer(ABC): + r""" + A tuner layer mixin that provides the common methods and attributes for all tuners. + + Args: + is_pluggable (`bool`, *optional*): + Whether the adapter layer can be plugged to any pytorch module + active_adapters (Union[List[`str`], `str`], *optional*): + The name of the active adapter. + """ + + active_adapter = None + + # All names of layers that may contain adapter (trainable) weights + adapter_layer_names: tuple[str] = () + # All names of other parameters that may contain adapter-related parameters + other_param_names: tuple[str] = () + + # indicates whether all adapters should be disabled + _disable_adapters: bool = False + + # the currently active adapter(s) + _active_adapter: str | list[str] = "default" + + # List all merged adapters + merged_adapters: list[str] = [] + + def get_base_layer(self) -> nn.Module: + """ + (Recursively) get the base_layer. + + This is necessary for the case that the tuner layer wraps another tuner layer. + + """ + base_layer = self + while hasattr(base_layer, "base_layer"): + base_layer = base_layer.base_layer + return base_layer + + @property + def weight(self) -> torch.Tensor: + # This is required for some transformers code, e.g. for T5, weight is accessed as: + # self.wo.weight + # where "wo" is the adapter layer. + # https://github.com/huggingface/transformers/blob/78f6ed6c70b29c1560780e3869a7ad4c6b3d2710/src/transformers + # /models/t5/modeling_t5.py#L292 + base_layer = self.get_base_layer() + if hasattr(base_layer, "qweight"): + # QuantLinear + weight = base_layer.qweight + else: + # Other layers + weight = base_layer.weight + return weight + + @property + def bias(self) -> torch.Tensor: + base_layer = self.get_base_layer() + return base_layer.bias + + def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: + raise NotImplementedError + + def unmerge(self) -> None: + raise NotImplementedError + + @property + def merged(self) -> bool: + return bool(self.merged_adapters) + + @property + def disable_adapters(self) -> bool: + # use a property to ensure that disable_adapters is not set directly, instead use the enable_adapters method + return self._disable_adapters + + @property + def active_adapter(self) -> str: + # use a property to ensure that active_adapter is not set directly, instead use the set_adapter method + return self._active_adapter + + @property + def active_adapters(self): + if isinstance(self.active_adapter, str): + return [self.active_adapter] + # is already a list of str + return self.active_adapter + + def enable_adapters(self, enabled: bool) -> None: + """Toggle the enabling and disabling of adapters + + Takes care of setting the requires_grad flag for the adapter weights. + + Args: + enabled (bool): True to enable adapters, False to disable adapters + """ + if enabled: + self.set_adapter(self.active_adapters) + self._disable_adapters = False + else: + # disable grads on all adapter layers + for layer_name in self.adapter_layer_names: + layer = getattr(self, layer_name) + layer.requires_grad_(False) + self._disable_adapters = True + + def set_adapter(self, adapter_names: str | list[str]) -> None: + """Set the active adapter(s). + + Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is + not desired, use the following code. + + ```py + >>> for name, param in model_peft.named_parameters(): + ... if ...: # some check on name (ex. if 'lora' in name) + ... param.requires_grad = False + ``` + + Args: + adapter_name (`str` or `List[str]`): Name of the adapter(s) to be activated. + """ + if isinstance(adapter_names, str): + adapter_names = [adapter_names] + + # Deactivate grads on the inactive adapter and activate grads on the active adapter + for layer_name in self.adapter_layer_names: + module_dict = getattr(self, layer_name) + for key, layer in module_dict.items(): + if key in adapter_names: + # Note: It is possible that not a single layer is called with requires_grad_(True) here. This may + # happen if a completely different adapter layer is being activated. + layer.requires_grad_(True) + else: + layer.requires_grad_(False) + + self._active_adapter = adapter_names + + def _all_available_adapter_names(self) -> list[str]: + """Return a sorted list of all available adapter names""" + adapter_names = set() + for name in self.adapter_layer_names + self.other_param_names: + # we check each possible attribute and if it's a dict or ModuleDict, we assume that the keys are the adapter + # names + attr = getattr(self, name) + if hasattr(attr, "keys"): + adapter_names.update(attr.keys()) + return sorted(adapter_names) + + def delete_adapter(self, adapter_name: str) -> None: + """ + Delete an adapter from the layer + + This should be called on all adapter layers, or else we will get an inconsistent state. + + This method will also set a new active adapter if the deleted adapter was an active adapter. It is important + that the new adapter is chosen in a deterministic way, so that the same adapter is chosen on all layers. + + Args: + adapter_name (`str`): The name of the adapter to delete + + """ + for attr in self.adapter_layer_names + self.other_param_names: + if adapter_name in getattr(self, attr): + del getattr(self, attr)[adapter_name] + + if adapter_name in self.active_adapters: + # choose a new active adapter + active_adapters = self.active_adapters[:] + active_adapters.remove(adapter_name) + if active_adapters: + self.set_adapter(active_adapters) + else: + # no active adapters left, set a new default adapter + # here we get the list of all adapters existing adapter names and choose the first one + remaining_adapters = self._all_available_adapter_names() + if not remaining_adapters: + self.set_adapter([]) + else: + new_active_adapter = remaining_adapters[0] + warnings.warn( + f"Adapter {adapter_name} was active which is now deleted. Setting active adapter to " + f"{new_active_adapter}." + ) + self.set_adapter(remaining_adapters[0]) + + +def check_target_module_exists(config, key: str) -> bool | re.Match[str] | None: + """A helper method to check if the passed module's key name matches any of the target modules in the adapter_config. + + Args: + config (`LoraConfig` | `LycorisConfig`): A config to match target modules from + key (`str`): A key to search any matches in config + + Returns: + `bool` | `re.Match[str]` | `None`: True of match object if key matches any target modules from config, False or + None if no match found + """ + if isinstance(config.target_modules, str): + target_module_found = re.fullmatch(config.target_modules, key) + elif key in config.target_modules: + # this module is specified directly in target_modules + target_module_found = True + else: + target_module_found = any(key.endswith(f".{target_key}") for target_key in config.target_modules) + + layer_indexes = getattr(config, "layers_to_transform", None) + layers_pattern = getattr(config, "layers_pattern", None) + + is_using_layer_indexes = layer_indexes is not None and ( + len(layer_indexes) != 0 if isinstance(layer_indexes, list) else True + ) + if is_using_layer_indexes and target_module_found: + layer_index = None + # TODO: It's still unclear how empty layers_pattern (None, [], or "") should behave + # For now, empty layers_pattern means any layer pattern is ok + if layers_pattern is None or len(layers_pattern) == 0: + layer_index = re.match(r".*\.[^.]*\.(\d+)\.", key) + else: + layers_pattern = [layers_pattern] if isinstance(layers_pattern, str) else layers_pattern + for pattern in layers_pattern: + layer_index = re.match(rf".*\.{pattern}\.(\d+)\.", key) + if layer_index is not None: + break + + if layer_index is None: + target_module_found = False + else: + layer_index = int(layer_index.group(1)) + if isinstance(layer_indexes, int): + target_module_found = layer_index == layer_indexes + else: + target_module_found = layer_index in layer_indexes + + return target_module_found + + +def inspect_matched_modules(tuner: BaseTuner, adapter_name: str = "default") -> dict: + """ + A helper function to inspect the set of matched and unmatched modules for a PEFT model and the given adapter. + """ + config = tuner.peft_config[adapter_name] + key_list = [key for key, _ in tuner.model.named_modules()] + module_dict = {"matched": [], "unmatched": []} + for key in key_list: + if tuner._check_target_module_exists(config, key): + module_dict["matched"].append(key) + else: + module_dict["unmatched"].append(key) + return module_dict + + +def _maybe_include_all_linear_layers(peft_config: PeftConfig, model: nn.Module) -> PeftConfig: + """ + Helper function to update `target_modules` to all linear/Conv1D layers if provided as 'all-linear'. Adapted from + the QLoRA repository: https://github.com/artidoro/qlora/blob/main/qlora.py + """ + + # if `target_modules` is a string, convert to lower case and check if it matches "all-linear" + if not ( + isinstance(peft_config.target_modules, str) + and peft_config.target_modules.lower() == INCLUDE_LINEAR_LAYERS_SHORTHAND + ): + return peft_config + + if not isinstance(model, PreTrainedModel): + raise ValueError( + f"Only instances of PreTrainedModel support `target_modules={INCLUDE_LINEAR_LAYERS_SHORTHAND!r}`" + ) + + linear_classes = (torch.nn.Linear, Conv1D) + + linear_module_names = set() + for name, module in model.named_modules(): + # match with all linear classes. + if isinstance(module, linear_classes): + names = name.rsplit(".", 1)[-1] # get the base name + linear_module_names.add(names) + + # ignore the last classification head for text generation models + output_emb = model.get_output_embeddings() + if output_emb is not None: + last_module_name = [name for name, module in model.named_modules() if module is output_emb][0] + linear_module_names -= {last_module_name} + peft_config.target_modules = linear_module_names + return peft_config + + +def check_adapters_to_merge(module: BaseTunerLayer, adapter_names: Optional[list[str]] = None) -> list[str]: + """ + Helper function to check which adapters should be merged. + + Only return those adapters that are not already merged. Give a warning if some or all of the adapters are already + merged. + + """ + if adapter_names is None: + adapter_names = module.active_adapters + + if module.merged: + merged_adapters = set(module.merged_adapters) + adapter_names = [name for name in adapter_names if name not in merged_adapters] + + if adapter_names: + warnings.warn( + f"Already following adapters were merged {','.join(module.merged_adapters)}. " + f"You are now additionally merging {','.join(adapter_names)}." + ) + else: + warnings.warn("All adapters are already merged, nothing to do.") + + return adapter_names + + +def clone_module(module: nn.Module, share_weights=False): + """Clone a module in a pytorch model. + + Clones a module of a model, optionally sharing all the parameters between the original and the clone. Simplifies + reusing a module when manipulating the architecture of a model. + """ + clone = copy.deepcopy(module) + + def _share_weights(src: nn.Module, dst: nn.Module): + for name, param in src.named_parameters(recurse=False): + dst.register_parameter(name, param) + + if share_weights: + for name, submodule in module.named_modules(): + _share_weights(submodule, clone.get_submodule(name)) + + return clone + + +def replicate_layers(model: nn.Module, layer_map: list[tuple[int, int]]): + """Replicate layers in a transfomer model with weight sharing. + + This function looks for a module list attribute at model[(.model)*].layers and replicates the layers in the module + list according to the layer map. For example the map `[[0, 4], [2, 5]]` will take the set of layers `[0, 1, 2, 3, + 4]` and replace them with a module list containing `[0, 1, 2, 3, 2, 3, 4]`. + """ + while hasattr(model, "model"): + model = model.model + # Some variants of the bert model nest the main model under the bert attribute. + if hasattr(model, "bert"): + model = model.bert + + model_type = None + layers: nn.ModuleList = None + if hasattr(model, "layers"): + model_type = "llama" + layers = model.layers + elif hasattr(model, "encoder") and hasattr(model.encoder, "layer"): + model_type = "bert" + layers = model.encoder.layer + elif hasattr(model, "h"): + model_type = "falcon" + layers = model.h + if not model_type or not isinstance(layers, nn.ModuleList): + raise ValueError( + "Could not locate the layers attribute in the model. " + "Expected Llama, Bert or Falcon compatible architectures." + ) + + new_layers = [] + for start, end in layer_map: + for i in range(start, end): + current_idx = len(new_layers) + new_layers.append(clone_module(layers[i], share_weights=True)) + # This is a hack needed to work around the layer_idx introduced in HF transformers. + for submodule in new_layers[-1].modules(): + if hasattr(submodule, "layer_idx"): + submodule.layer_idx = current_idx + layers = nn.ModuleList(new_layers) + if model_type == "llama": + model.layers = layers + elif model_type == "bert": + model.encoder.layer = layers + elif model_type == "falcon": + model.h = layers + else: + raise ValueError("Unexpected model type, need to handle post-processing of layers.") + if hasattr(model.config, "num_hidden_layers"): # Common to Llama, Bert, Falcon. + model.config.num_hidden_layers = len(new_layers) diff --git a/llmeval-env/lib/python3.10/site-packages/peft/utils/__pycache__/constants.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/utils/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed92502f85cdae457bacd3081ad302722ac829c7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/utils/__pycache__/constants.cpython-310.pyc differ