applied-ai-018 commited on
Commit
175ee62
·
verified ·
1 Parent(s): 39ccdaf

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/peft/__pycache__/__init__.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/peft/__pycache__/auto.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/peft/__pycache__/config.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/peft/__pycache__/helpers.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/peft/__pycache__/import_utils.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/peft/__pycache__/mapping.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/peft/__pycache__/mixed_model.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/peft/__pycache__/peft_model.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/peft/tuners/__init__.py +32 -0
  10. llmeval-env/lib/python3.10/site-packages/peft/tuners/__pycache__/__init__.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/peft/tuners/__pycache__/lycoris_utils.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/peft/tuners/__pycache__/tuners_utils.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/__init__.py +37 -0
  14. llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/bnb.py +145 -0
  15. llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/config.py +52 -0
  16. llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/layer.py +347 -0
  17. llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/model.py +346 -0
  18. llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/__init__.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/bnb.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/config.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/layer.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/model.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/model.py +394 -0
  24. llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/__init__.py +20 -0
  25. llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/__init__.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/config.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/layer.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/model.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/config.py +121 -0
  30. llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/layer.py +375 -0
  31. llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/model.py +114 -0
  32. llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/config.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/aqlm.py +100 -0
  34. llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/awq.py +108 -0
  35. llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/bnb.py +508 -0
  36. llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/config.py +299 -0
  37. llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/layer.py +1066 -0
  38. llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/model.py +793 -0
  39. llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/tp_layer.py +230 -0
  40. llmeval-env/lib/python3.10/site-packages/peft/tuners/lycoris_utils.py +428 -0
  41. llmeval-env/lib/python3.10/site-packages/peft/tuners/mixed/__init__.py +18 -0
  42. llmeval-env/lib/python3.10/site-packages/peft/tuners/mixed/__pycache__/__init__.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/peft/tuners/mixed/__pycache__/model.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/peft/tuners/mixed/model.py +339 -0
  45. llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__init__.py +19 -0
  46. llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/__init__.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/config.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/model.cpython-310.pyc +0 -0
  49. llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/config.py +61 -0
  50. llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/model.py +115 -0
llmeval-env/lib/python3.10/site-packages/peft/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/peft/__pycache__/auto.cpython-310.pyc ADDED
Binary file (4.86 kB). View file
 
llmeval-env/lib/python3.10/site-packages/peft/__pycache__/config.cpython-310.pyc ADDED
Binary file (8.79 kB). View file
 
llmeval-env/lib/python3.10/site-packages/peft/__pycache__/helpers.cpython-310.pyc ADDED
Binary file (4.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/peft/__pycache__/import_utils.cpython-310.pyc ADDED
Binary file (1.93 kB). View file
 
llmeval-env/lib/python3.10/site-packages/peft/__pycache__/mapping.cpython-310.pyc ADDED
Binary file (4.69 kB). View file
 
llmeval-env/lib/python3.10/site-packages/peft/__pycache__/mixed_model.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/peft/__pycache__/peft_model.cpython-310.pyc ADDED
Binary file (53.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/peft/tuners/__init__.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+ # There's no way to ignore "F401 '...' imported but unused" warnings in this
3
+ # module, but to preserve other warnings. So, don't check this module at all
4
+
5
+ # coding=utf-8
6
+ # Copyright 2023-present the HuggingFace Inc. team.
7
+ #
8
+ # Licensed under the Apache License, Version 2.0 (the "License");
9
+ # you may not use this file except in compliance with the License.
10
+ # You may obtain a copy of the License at
11
+ #
12
+ # http://www.apache.org/licenses/LICENSE-2.0
13
+ #
14
+ # Unless required by applicable law or agreed to in writing, software
15
+ # distributed under the License is distributed on an "AS IS" BASIS,
16
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ # See the License for the specific language governing permissions and
18
+ # limitations under the License.
19
+
20
+ from .adaption_prompt import AdaptionPromptConfig, AdaptionPromptModel
21
+ from .lora import LoraConfig, LoraModel, LoftQConfig
22
+ from .loha import LoHaConfig, LoHaModel
23
+ from .lokr import LoKrConfig, LoKrModel
24
+ from .ia3 import IA3Config, IA3Model
25
+ from .adalora import AdaLoraConfig, AdaLoraModel
26
+ from .p_tuning import PromptEncoder, PromptEncoderConfig, PromptEncoderReparameterizationType
27
+ from .prefix_tuning import PrefixEncoder, PrefixTuningConfig
28
+ from .prompt_tuning import PromptEmbedding, PromptTuningConfig, PromptTuningInit
29
+ from .multitask_prompt_tuning import MultitaskPromptEmbedding, MultitaskPromptTuningConfig, MultitaskPromptTuningInit
30
+ from .oft import OFTConfig, OFTModel
31
+ from .mixed import MixedModel
32
+ from .poly import PolyConfig, PolyModel
llmeval-env/lib/python3.10/site-packages/peft/tuners/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.21 kB). View file
 
llmeval-env/lib/python3.10/site-packages/peft/tuners/__pycache__/lycoris_utils.cpython-310.pyc ADDED
Binary file (14.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/peft/tuners/__pycache__/tuners_utils.cpython-310.pyc ADDED
Binary file (25.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/__init__.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from peft.import_utils import is_bnb_4bit_available, is_bnb_available
16
+
17
+ from .config import AdaLoraConfig
18
+ from .gptq import SVDQuantLinear
19
+ from .layer import AdaLoraLayer, RankAllocator, SVDLinear
20
+ from .model import AdaLoraModel
21
+
22
+
23
+ __all__ = ["AdaLoraConfig", "AdaLoraLayer", "AdaLoraModel", "SVDLinear", "RankAllocator", "SVDQuantLinear"]
24
+
25
+
26
+ def __getattr__(name):
27
+ if (name == "SVDLinear8bitLt") and is_bnb_available():
28
+ from .bnb import SVDLinear8bitLt
29
+
30
+ return SVDLinear8bitLt
31
+
32
+ if (name == "SVDLinear4bit") and is_bnb_4bit_available():
33
+ from .bnb import SVDLinear4bit
34
+
35
+ return SVDLinear4bit
36
+
37
+ raise AttributeError(f"module {__name__} has no attribute {name}")
llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/bnb.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Any
16
+
17
+ import torch
18
+
19
+ from peft.import_utils import is_bnb_4bit_available, is_bnb_available
20
+
21
+ from .layer import AdaLoraLayer
22
+
23
+
24
+ if is_bnb_available():
25
+
26
+ class SVDLinear8bitLt(torch.nn.Module, AdaLoraLayer):
27
+ # Low-rank matrix for SVD-based adaptation
28
+ def __init__(
29
+ self,
30
+ base_layer: torch.nn.Module,
31
+ adapter_name: str,
32
+ r: int = 0,
33
+ lora_alpha: int = 1,
34
+ lora_dropout: float = 0.0,
35
+ init_lora_weights: bool = True,
36
+ **kwargs,
37
+ ) -> None:
38
+ super().__init__()
39
+ AdaLoraLayer.__init__(self, base_layer)
40
+ # Freezing the pre-trained weight matrix
41
+ self.get_base_layer().weight.requires_grad = False
42
+
43
+ self._active_adapter = adapter_name
44
+ self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
45
+
46
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
47
+ # note: no check for self.merged because merging is not supported (yet)
48
+ result = self.base_layer(x)
49
+
50
+ if self.disable_adapters:
51
+ return result
52
+
53
+ for active_adapter in self.active_adapters:
54
+ if active_adapter not in self.lora_A.keys():
55
+ continue
56
+ requires_conversion = not torch.is_autocast_enabled()
57
+ if requires_conversion:
58
+ expected_dtype = result.dtype
59
+ if x.dtype != torch.float32:
60
+ x = x.float()
61
+
62
+ lora_A = self.lora_A[active_adapter]
63
+ lora_B = self.lora_B[active_adapter]
64
+ lora_E = self.lora_E[active_adapter]
65
+ dropout = self.lora_dropout[active_adapter]
66
+ scaling = self.scaling[active_adapter]
67
+ ranknum = self.ranknum[active_adapter] + 1e-5
68
+
69
+ output = dropout(x) @ (lora_A * lora_E).T @ lora_B.T
70
+ if requires_conversion:
71
+ output = output.to(expected_dtype)
72
+ output = output * scaling / ranknum
73
+ # inplace operation on view is forbidden for MatMul8bitLtBackward, so avoid it
74
+ result = result + output
75
+ return result
76
+
77
+ def __repr__(self) -> str:
78
+ rep = super().__repr__()
79
+ return "adalora." + rep
80
+
81
+
82
+ if is_bnb_4bit_available():
83
+
84
+ class SVDLinear4bit(torch.nn.Module, AdaLoraLayer):
85
+ # Low-rank matrix for SVD-based adaptation
86
+ def __init__(
87
+ self,
88
+ base_layer: torch.nn.Module,
89
+ adapter_name: str,
90
+ r: int = 0,
91
+ lora_alpha: int = 1,
92
+ lora_dropout: float = 0.0,
93
+ init_lora_weights: bool = True,
94
+ **kwargs,
95
+ ) -> None:
96
+ super().__init__()
97
+ AdaLoraLayer.__init__(self, base_layer)
98
+ # Freezing the pre-trained weight matrix
99
+ self.get_base_layer().weight.requires_grad = False
100
+
101
+ self._active_adapter = adapter_name
102
+ self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
103
+
104
+ def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
105
+ # note: no check for self.merged because merging is not supported (yet)
106
+ result = self.base_layer(x, *args, **kwargs)
107
+
108
+ if self.disable_adapters:
109
+ return result
110
+
111
+ # As per Tim Dettmers, for 4bit, we need to defensively clone here.
112
+ # The reason is that in some cases, an error can occur that backprop
113
+ # does not work on a manipulated view. This issue may be solved with
114
+ # newer PyTorch versions but this would need extensive testing to be
115
+ # sure.
116
+ result = result.clone()
117
+
118
+ for active_adapter in self.active_adapters:
119
+ if active_adapter not in self.lora_A.keys():
120
+ continue
121
+
122
+ lora_A = self.lora_A[active_adapter]
123
+ lora_B = self.lora_B[active_adapter]
124
+ lora_E = self.lora_E[active_adapter]
125
+ dropout = self.lora_dropout[active_adapter]
126
+ scaling = self.scaling[active_adapter]
127
+ ranknum = self.ranknum[active_adapter] + 1e-5
128
+
129
+ requires_conversion = not torch.is_autocast_enabled()
130
+ if requires_conversion:
131
+ expected_dtype = result.dtype
132
+ compute_dtype = lora_A.dtype
133
+ if x.dtype != compute_dtype:
134
+ x = x.to(compute_dtype)
135
+
136
+ output = dropout(x) @ (lora_A * lora_E).T @ lora_B.T
137
+ if requires_conversion:
138
+ output = output.to(expected_dtype)
139
+ output = output * scaling / ranknum
140
+ result += output
141
+ return result
142
+
143
+ def __repr__(self) -> str:
144
+ rep = super().__repr__()
145
+ return "adalora." + rep
llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/config.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from dataclasses import dataclass, field
16
+ from typing import Optional
17
+
18
+ from peft.tuners.lora import LoraConfig
19
+ from peft.utils import PeftType
20
+
21
+
22
+ @dataclass
23
+ class AdaLoraConfig(LoraConfig):
24
+ """
25
+ This is the configuration class to store the configuration of a [`~peft.AdaLora`].
26
+
27
+ Args:
28
+ target_r (`int`): The target average rank of incremental matrix.
29
+ init_r (`int`): The initial rank for each incremental matrix.
30
+ tinit (`int`): The steps of initial fine-tuning warmup.
31
+ tfinal (`int`): The step of final fine-tuning.
32
+ deltaT (`int`): The time internval between two budget allocations.
33
+ beta1 (`float`): The hyperparameter of EMA for sensitivity smoothing.
34
+ beta2 (`float`): The hyperparameter of EMA for undertainty quantification.
35
+ orth_reg_weight (`float`): The coefficient of orthogonal regularization.
36
+ total_step (`int`): The total training steps that should be specified before training.
37
+ rank_pattern (`list`): The allocated rank for each weight matrix by RankAllocator.
38
+ """
39
+
40
+ target_r: int = field(default=8, metadata={"help": "Target Lora matrix dimension."})
41
+ init_r: int = field(default=12, metadata={"help": "Initial Lora matrix dimension."})
42
+ tinit: int = field(default=0, metadata={"help": "The steps of initial warmup."})
43
+ tfinal: int = field(default=0, metadata={"help": "The steps of final warmup."})
44
+ deltaT: int = field(default=1, metadata={"help": "Step interval of rank allocation."})
45
+ beta1: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."})
46
+ beta2: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."})
47
+ orth_reg_weight: float = field(default=0.5, metadata={"help": "The orthogonal regularization coefficient."})
48
+ total_step: Optional[int] = field(default=None, metadata={"help": "The total training steps."})
49
+ rank_pattern: Optional[dict] = field(default=None, metadata={"help": "The saved rank pattern."})
50
+
51
+ def __post_init__(self):
52
+ self.peft_type = PeftType.ADALORA
llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/layer.py ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import warnings
16
+ from typing import Any, List, Optional
17
+
18
+ import torch
19
+ from torch import nn
20
+
21
+ from peft.tuners.lora import LoraLayer
22
+ from peft.tuners.tuners_utils import check_adapters_to_merge
23
+ from peft.utils import transpose
24
+
25
+
26
+ class AdaLoraLayer(LoraLayer):
27
+ # List all names of layers that may contain adapter weights
28
+ # Note: ranknum doesn't need to be included as it is not an nn.Module
29
+ adapter_layer_names = ("lora_A", "lora_B", "lora_E", "lora_embedding_A", "lora_embedding_B")
30
+ # other_param_names is defined in LoraLayer
31
+
32
+ def __init__(self, base_layer: nn.Module) -> None:
33
+ super().__init__(base_layer)
34
+ self.lora_E = nn.ParameterDict({})
35
+ self.lora_A = nn.ParameterDict({})
36
+ self.lora_B = nn.ParameterDict({})
37
+ self.ranknum = nn.ParameterDict({})
38
+
39
+ def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights):
40
+ if r < 0:
41
+ # note: r == 0 is allowed for AdaLora, see #1539
42
+ raise ValueError(f"`r` should be a positive integer or 0, but the value passed is {r}")
43
+
44
+ self.r[adapter_name] = r
45
+ self.lora_alpha[adapter_name] = lora_alpha
46
+ if lora_dropout > 0.0:
47
+ lora_dropout_layer = nn.Dropout(p=lora_dropout)
48
+ else:
49
+ lora_dropout_layer = nn.Identity()
50
+
51
+ self.lora_dropout[adapter_name] = lora_dropout_layer
52
+ # Actual trainable parameters
53
+ # Right singular vectors
54
+ self.lora_A[adapter_name] = nn.Parameter(torch.randn(r, self.in_features))
55
+ # Singular values
56
+ self.lora_E[adapter_name] = nn.Parameter(torch.randn(r, 1))
57
+ # Left singular vectors
58
+ self.lora_B[adapter_name] = nn.Parameter(torch.randn(self.out_features, r))
59
+ # The current rank
60
+ self.ranknum[adapter_name] = nn.Parameter(torch.randn(1), requires_grad=False)
61
+ self.ranknum[adapter_name].data.fill_(float(r))
62
+ self.ranknum[adapter_name].requires_grad = False
63
+ self.scaling[adapter_name] = lora_alpha if lora_alpha > 0 else float(r)
64
+ if init_lora_weights:
65
+ self.reset_lora_parameters(adapter_name)
66
+
67
+ if hasattr(self.get_base_layer(), "qweight"):
68
+ # QuantLinear
69
+ self.to(self.get_base_layer().qweight.device)
70
+ else:
71
+ self.to(self.get_base_layer().weight.device)
72
+ self.set_adapter(self.active_adapters)
73
+
74
+ def reset_lora_parameters(self, adapter_name):
75
+ if adapter_name in self.lora_A.keys():
76
+ nn.init.normal_(self.lora_E[adapter_name], mean=0.0, std=0.02)
77
+ nn.init.normal_(self.lora_A[adapter_name], mean=0.0, std=0.02)
78
+ nn.init.normal_(self.lora_B[adapter_name], mean=0.0, std=0.02)
79
+
80
+
81
+ class SVDLinear(nn.Module, AdaLoraLayer):
82
+ # SVD-based adaptation by a dense layer
83
+ def __init__(
84
+ self,
85
+ base_layer: nn.Module,
86
+ adapter_name: str,
87
+ r: int = 0,
88
+ lora_alpha: int = 1,
89
+ lora_dropout: float = 0.0,
90
+ fan_in_fan_out: bool = False,
91
+ init_lora_weights: bool = True,
92
+ **kwargs,
93
+ ) -> None:
94
+ super().__init__()
95
+ AdaLoraLayer.__init__(self, base_layer)
96
+ # Freezing the pre-trained weight matrix
97
+ self.get_base_layer().weight.requires_grad = False
98
+
99
+ self.fan_in_fan_out = fan_in_fan_out
100
+ self._active_adapter = adapter_name
101
+ self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
102
+
103
+ def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None:
104
+ """
105
+ Merge the active adapter weights into the base weights
106
+
107
+ Args:
108
+ safe_merge (`bool`, *optional*):
109
+ If True, the merge operation will be performed in a copy of the original weights and check for NaNs
110
+ before merging the weights. This is useful if you want to check if the merge operation will produce
111
+ NaNs. Defaults to `False`.
112
+ adapter_names (`List[str]`, *optional*):
113
+ The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
114
+ to `None`.
115
+ """
116
+ adapter_names = check_adapters_to_merge(self, adapter_names)
117
+ if not adapter_names:
118
+ # no adapter to merge
119
+ return
120
+
121
+ for active_adapter in adapter_names:
122
+ base_layer = self.get_base_layer()
123
+ if active_adapter in self.lora_A.keys():
124
+ if safe_merge:
125
+ # Note that safe_merge will be slower than the normal merge
126
+ # because of the copy operation.
127
+ orig_weights = base_layer.weight.data.clone()
128
+ orig_weights += self.get_delta_weight(active_adapter)
129
+
130
+ if not torch.isfinite(orig_weights).all():
131
+ raise ValueError(
132
+ f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
133
+ )
134
+
135
+ base_layer.weight.data = orig_weights
136
+ else:
137
+ base_layer.weight.data += self.get_delta_weight(active_adapter)
138
+ self.merged_adapters.append(active_adapter)
139
+
140
+ def unmerge(self) -> None:
141
+ """
142
+ This method unmerges all merged adapter layers from the base weights.
143
+ """
144
+ if not self.merged:
145
+ warnings.warn("Already unmerged. Nothing to do.")
146
+ return
147
+ while len(self.merged_adapters) > 0:
148
+ active_adapter = self.merged_adapters.pop()
149
+ if active_adapter in self.lora_A.keys():
150
+ self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter)
151
+
152
+ def get_delta_weight(self, adapter) -> torch.Tensor:
153
+ return (
154
+ transpose(self.lora_B[adapter] @ (self.lora_A[adapter] * self.lora_E[adapter]), self.fan_in_fan_out)
155
+ * self.scaling[adapter]
156
+ / (self.ranknum[adapter] + 1e-5)
157
+ )
158
+
159
+ def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
160
+ if self.disable_adapters:
161
+ if self.merged:
162
+ self.unmerge()
163
+ result = self.base_layer(x, *args, **kwargs)
164
+ elif self.merged:
165
+ result = self.base_layer(x, *args, **kwargs)
166
+ else:
167
+ result = self.base_layer(x, *args, **kwargs)
168
+ for active_adapter in self.active_adapters:
169
+ if active_adapter not in self.lora_A.keys():
170
+ continue
171
+ lora_A = self.lora_A[active_adapter]
172
+ lora_B = self.lora_B[active_adapter]
173
+ lora_E = self.lora_E[active_adapter]
174
+ dropout = self.lora_dropout[active_adapter]
175
+ scaling = self.scaling[active_adapter]
176
+ ranknum = self.ranknum[active_adapter] + 1e-5
177
+
178
+ x = x.to(lora_A.dtype)
179
+ result += (dropout(x) @ (lora_A * lora_E).T @ lora_B.T) * scaling / ranknum
180
+
181
+ return result
182
+
183
+ def __repr__(self) -> str:
184
+ rep = super().__repr__()
185
+ return "adalora." + rep
186
+
187
+
188
+ class RankAllocator:
189
+ """
190
+ The RankAllocator for AdaLoraModel. Paper: https://openreview.net/pdf?id=lq62uWRJjiY
191
+
192
+ Args:
193
+ config ([`AdaLoraConfig`]): The configuration of the AdaLora model.
194
+ model: the model that we apply AdaLoRA to.
195
+
196
+ """
197
+
198
+ def __init__(self, model, peft_config, adapter_name):
199
+ self.peft_config = peft_config
200
+ self.adapter_name = adapter_name
201
+ self.beta1 = peft_config.beta1
202
+ self.beta2 = peft_config.beta2
203
+ assert self.beta1 > 0 and self.beta1 < 1
204
+ assert self.beta2 > 0 and self.beta2 < 1
205
+
206
+ self.reset_ipt()
207
+ self._set_budget_scheduler(model)
208
+
209
+ def set_total_step(self, total_step):
210
+ self.peft_config.total_step = total_step
211
+
212
+ def reset_ipt(self):
213
+ self.ipt = {}
214
+ self.exp_avg_ipt = {}
215
+ self.exp_avg_unc = {}
216
+
217
+ def _set_budget_scheduler(self, model):
218
+ self.init_bgt = 0
219
+ self.name_set = set()
220
+ for n, p in model.named_parameters():
221
+ if f"lora_A.{self.adapter_name}" in n:
222
+ self.init_bgt += p.size(0)
223
+ self.name_set.add(n.replace("lora_A", "%s"))
224
+ self.name_set = sorted(self.name_set)
225
+ # The total final rank budget
226
+ self.target_bgt = self.peft_config.target_r * len(self.name_set)
227
+
228
+ def budget_schedule(self, step: int):
229
+ tinit = self.peft_config.tinit
230
+ tfinal = self.peft_config.tfinal
231
+ total_step = self.peft_config.total_step
232
+ # Initial warmup
233
+ if step <= tinit:
234
+ budget = self.init_bgt
235
+ mask_ind = False
236
+ # Final fine-tuning
237
+ elif step > total_step - tfinal:
238
+ budget = self.target_bgt
239
+ mask_ind = True
240
+ else:
241
+ # Budget decreasing with a cubic scheduler
242
+ mul_coeff = 1 - (step - tinit) / (total_step - tfinal - tinit)
243
+ budget = int((self.init_bgt - self.target_bgt) * (mul_coeff**3) + self.target_bgt)
244
+ mask_ind = True if step % self.peft_config.deltaT == 0 else False
245
+ return budget, mask_ind
246
+
247
+ def update_ipt(self, model):
248
+ # Update the sensitivity and uncertainty for every weight
249
+ for n, p in model.named_parameters():
250
+ if "lora_" in n and self.adapter_name in n:
251
+ if n not in self.ipt:
252
+ self.ipt[n] = torch.zeros_like(p)
253
+ self.exp_avg_ipt[n] = torch.zeros_like(p)
254
+ self.exp_avg_unc[n] = torch.zeros_like(p)
255
+ with torch.no_grad():
256
+ self.ipt[n] = (p * p.grad).abs().detach()
257
+ # Sensitivity smoothing
258
+ self.exp_avg_ipt[n] = self.beta1 * self.exp_avg_ipt[n] + (1 - self.beta1) * self.ipt[n]
259
+ # Uncertainty quantification
260
+ self.exp_avg_unc[n] = (
261
+ self.beta2 * self.exp_avg_unc[n] + (1 - self.beta2) * (self.ipt[n] - self.exp_avg_ipt[n]).abs()
262
+ )
263
+
264
+ def _element_score(self, n):
265
+ return self.exp_avg_ipt[n] * self.exp_avg_unc[n]
266
+
267
+ def _combine_ipt(self, ipt_E, ipt_AB):
268
+ ipt_AB = ipt_AB.sum(dim=1, keepdim=False)
269
+ sum_ipt = ipt_E.view(-1) + ipt_AB.view(-1)
270
+ return sum_ipt
271
+
272
+ def mask_to_budget(self, model, budget):
273
+ value_ipt = {}
274
+ vector_ipt = {}
275
+ triplet_ipt = {}
276
+ # Get the importance score for A, E, B
277
+ for n, p in model.named_parameters():
278
+ if f"lora_A.{self.adapter_name}" in n:
279
+ entry_ipt = self._element_score(n)
280
+ comb_ipt = torch.mean(entry_ipt, dim=1, keepdim=True)
281
+ name_m = n.replace("lora_A", "%s")
282
+ if name_m not in vector_ipt:
283
+ vector_ipt[name_m] = [comb_ipt]
284
+ else:
285
+ vector_ipt[name_m].append(comb_ipt)
286
+ if f"lora_B.{self.adapter_name}" in n:
287
+ entry_ipt = self._element_score(n)
288
+ comb_ipt = torch.mean(entry_ipt, dim=0, keepdim=False).view(-1, 1)
289
+ name_m = n.replace("lora_B", "%s")
290
+ if name_m not in vector_ipt:
291
+ vector_ipt[name_m] = [comb_ipt]
292
+ else:
293
+ vector_ipt[name_m].append(comb_ipt)
294
+ if f"lora_E.{self.adapter_name}" in n:
295
+ entry_ipt = self._element_score(n)
296
+ name_m = n.replace("lora_E", "%s")
297
+ value_ipt[name_m] = entry_ipt
298
+
299
+ all_score = []
300
+ # Calculate the score for each triplet
301
+ for name_m in vector_ipt:
302
+ ipt_E = value_ipt[name_m]
303
+ ipt_AB = torch.cat(vector_ipt[name_m], dim=1)
304
+ sum_ipt = self._combine_ipt(ipt_E, ipt_AB)
305
+ name_E = name_m % "lora_E"
306
+ triplet_ipt[name_E] = sum_ipt.view(-1, 1)
307
+ all_score.append(sum_ipt.view(-1))
308
+
309
+ # Get the threshold by ranking ipt
310
+ mask_threshold = torch.kthvalue(
311
+ torch.cat(all_score),
312
+ k=self.init_bgt - budget,
313
+ )[0].item()
314
+
315
+ rank_pattern = {}
316
+ # Mask the unimportant triplets
317
+ with torch.no_grad():
318
+ for n, p in model.named_parameters():
319
+ if f"lora_E.{self.adapter_name}" in n:
320
+ p.masked_fill_(triplet_ipt[n] <= mask_threshold, 0.0)
321
+ rank_pattern[n] = (~(triplet_ipt[n] <= mask_threshold)).view(-1).tolist()
322
+ return rank_pattern
323
+
324
+ def update_and_allocate(self, model, global_step, force_mask=False):
325
+ # # Update the importance score and allocate the budget
326
+ if global_step < self.peft_config.total_step - self.peft_config.tfinal:
327
+ self.update_ipt(model)
328
+ budget, mask_ind = self.budget_schedule(global_step)
329
+ # Allocate the budget according to importance scores
330
+ if mask_ind or force_mask:
331
+ rank_pattern = self.mask_to_budget(model, budget)
332
+ else:
333
+ rank_pattern = None
334
+ return budget, rank_pattern
335
+
336
+ def mask_using_rank_pattern(self, model, rank_pattern):
337
+ # Mask the unimportant triplets
338
+ is_adapter_name_truncated = False
339
+ if self.adapter_name not in next(iter(rank_pattern.keys())):
340
+ is_adapter_name_truncated = True
341
+
342
+ with torch.no_grad():
343
+ for n, p in model.named_parameters():
344
+ if f"lora_E.{self.adapter_name}" in n:
345
+ key = n if not is_adapter_name_truncated else n.replace(f".{self.adapter_name}", "")
346
+ mask = torch.Tensor(rank_pattern[key]).unsqueeze(-1).to(p.device)
347
+ p.masked_fill_(~mask.bool(), 0.0)
llmeval-env/lib/python3.10/site-packages/peft/tuners/adalora/model.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import warnings
16
+
17
+ import torch
18
+ from transformers.pytorch_utils import Conv1D
19
+
20
+ from peft.import_utils import is_bnb_4bit_available, is_bnb_available
21
+ from peft.tuners.lora import LoraConfig, LoraModel
22
+ from peft.tuners.tuners_utils import BaseTunerLayer
23
+ from peft.utils import (
24
+ TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING,
25
+ _freeze_adapter,
26
+ _get_submodules,
27
+ get_auto_gptq_quant_linear,
28
+ get_quantization_config,
29
+ )
30
+
31
+ from .gptq import SVDQuantLinear
32
+ from .layer import AdaLoraLayer, RankAllocator, SVDLinear
33
+
34
+
35
+ class AdaLoraModel(LoraModel):
36
+ """
37
+ Creates AdaLoRA (Adaptive LoRA) model from a pretrained transformers model. Paper:
38
+ https://openreview.net/forum?id=lq62uWRJjiY
39
+
40
+ Args:
41
+ model ([`transformers.PreTrainedModel`]): The model to be adapted.
42
+ config ([`AdaLoraConfig`]): The configuration of the AdaLora model.
43
+ adapter_name (`str`): The name of the adapter, defaults to `"default"`.
44
+
45
+ Returns:
46
+ `torch.nn.Module`: The AdaLora model.
47
+
48
+ Example::
49
+
50
+ >>> from transformers import AutoModelForSeq2SeqLM, LoraConfig >>> from peft import AdaLoraModel, AdaLoraConfig
51
+ >>> config = AdaLoraConfig(
52
+ peft_type="ADALORA", task_type="SEQ_2_SEQ_LM", r=8, lora_alpha=32, target_modules=["q", "v"],
53
+ lora_dropout=0.01,
54
+ )
55
+ >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> model = AdaLoraModel(model, config, "default")
56
+
57
+ **Attributes**:
58
+ - **model** ([`transformers.PreTrainedModel`]) -- The model to be adapted.
59
+ - **peft_config** ([`AdaLoraConfig`]): The configuration of the AdaLora model.
60
+ """
61
+
62
+ # Note: don't redefine prefix here, it should be inherited from LoraModel
63
+
64
+ def __init__(self, model, config, adapter_name):
65
+ super().__init__(model, config, adapter_name)
66
+
67
+ traininable_mode_counter = 0
68
+ for config in self.peft_config.values():
69
+ if not config.inference_mode:
70
+ traininable_mode_counter += 1
71
+
72
+ if traininable_mode_counter > 1:
73
+ raise ValueError(
74
+ "AdaLoraModel supports only 1 trainable adapter. "
75
+ "When using multiple adapters, set inference_mode to True for all adapters except the one you want to train."
76
+ )
77
+
78
+ if self.peft_config[adapter_name].inference_mode:
79
+ _freeze_adapter(self.model, adapter_name)
80
+ else:
81
+ self.trainable_adapter_name = adapter_name
82
+ self.rankallocator = RankAllocator(self.model, self.peft_config[adapter_name], self.trainable_adapter_name)
83
+
84
+ def _check_new_adapter_config(self, config: LoraConfig) -> None:
85
+ """
86
+ A helper method to check the config when a new adapter is being added.
87
+
88
+ Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters.
89
+
90
+ """
91
+ super()._check_new_adapter_config(config)
92
+
93
+ traininable_mode_counter = 0
94
+ for config_ in self.peft_config.values():
95
+ if not config_.inference_mode:
96
+ traininable_mode_counter += 1
97
+
98
+ if traininable_mode_counter > 1:
99
+ raise ValueError(
100
+ f"{self.__class__.__name__} supports only 1 trainable adapter. "
101
+ "When using multiple adapters, set inference_mode to True for all adapters except the one "
102
+ "you want to train."
103
+ )
104
+
105
+ def _create_and_replace(
106
+ self,
107
+ lora_config,
108
+ adapter_name,
109
+ target,
110
+ target_name,
111
+ parent,
112
+ current_key,
113
+ ):
114
+ kwargs = {
115
+ "r": lora_config.init_r,
116
+ "lora_alpha": lora_config.lora_alpha,
117
+ "lora_dropout": lora_config.lora_dropout,
118
+ "fan_in_fan_out": lora_config.fan_in_fan_out,
119
+ "init_lora_weights": lora_config.init_lora_weights,
120
+ "loaded_in_8bit": getattr(self.model, "is_loaded_in_8bit", False),
121
+ "loaded_in_4bit": getattr(self.model, "is_loaded_in_4bit", False),
122
+ }
123
+ if (kwargs["loaded_in_8bit"] or kwargs["loaded_in_4bit"]) and not is_bnb_available():
124
+ raise ImportError(
125
+ "To use AdaLora with 8-bit quantization, please install the `bitsandbytes` package. "
126
+ "You can install it with `pip install bitsandbytes`."
127
+ )
128
+
129
+ quantization_config = get_quantization_config(self.model, method="gptq")
130
+ if quantization_config is not None:
131
+ kwargs["gptq_quantization_config"] = quantization_config
132
+
133
+ # If it is not an AdaLoraLayer, create a new module, else update it with new adapters
134
+ if not isinstance(target, AdaLoraLayer):
135
+ new_module = self._create_new_module(lora_config, adapter_name, target, **kwargs)
136
+ if adapter_name != self.active_adapter:
137
+ # adding an additional adapter: it is not automatically trainable
138
+ new_module.requires_grad_(False)
139
+ self._replace_module(parent, target_name, new_module, target)
140
+ else:
141
+ target.update_layer(
142
+ adapter_name,
143
+ lora_config.init_r,
144
+ lora_config.lora_alpha,
145
+ lora_config.lora_dropout,
146
+ lora_config.init_lora_weights,
147
+ )
148
+
149
+ @staticmethod
150
+ def _create_new_module(lora_config, adapter_name, target, **kwargs):
151
+ # avoid eager bnb import
152
+ if is_bnb_available():
153
+ import bitsandbytes as bnb
154
+
155
+ from .bnb import SVDLinear8bitLt
156
+ if is_bnb_4bit_available():
157
+ from .bnb import SVDLinear4bit
158
+
159
+ gptq_quantization_config = kwargs.get("gptq_quantization_config", None)
160
+ AutoGPTQQuantLinear = get_auto_gptq_quant_linear(gptq_quantization_config)
161
+
162
+ loaded_in_8bit = kwargs.pop("loaded_in_8bit", False)
163
+ loaded_in_4bit = kwargs.pop("loaded_in_4bit", False)
164
+
165
+ if isinstance(target, BaseTunerLayer):
166
+ target_base_layer = target.get_base_layer()
167
+ else:
168
+ target_base_layer = target
169
+
170
+ if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt):
171
+ kwargs.update(
172
+ {
173
+ "has_fp16_weights": target_base_layer.state.has_fp16_weights,
174
+ "memory_efficient_backward": target_base_layer.state.memory_efficient_backward,
175
+ "threshold": target_base_layer.state.threshold,
176
+ "index": target_base_layer.index,
177
+ }
178
+ )
179
+ new_module = SVDLinear8bitLt(target, adapter_name, **kwargs)
180
+ elif loaded_in_4bit and is_bnb_4bit_available() and isinstance(target_base_layer, bnb.nn.Linear4bit):
181
+ fourbit_kwargs = kwargs.copy()
182
+ fourbit_kwargs.update(
183
+ {
184
+ "compute_dtype": target_base_layer.compute_dtype,
185
+ "compress_statistics": target_base_layer.weight.compress_statistics,
186
+ "quant_type": target_base_layer.weight.quant_type,
187
+ }
188
+ )
189
+ new_module = SVDLinear4bit(target, adapter_name, **fourbit_kwargs)
190
+ elif AutoGPTQQuantLinear is not None and isinstance(target, AutoGPTQQuantLinear):
191
+ new_module = SVDQuantLinear(target, adapter_name, **kwargs)
192
+ else:
193
+ if isinstance(target_base_layer, torch.nn.Linear):
194
+ if kwargs["fan_in_fan_out"]:
195
+ warnings.warn(
196
+ "fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. "
197
+ "Setting fan_in_fan_out to False."
198
+ )
199
+ kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False
200
+ elif isinstance(target_base_layer, Conv1D):
201
+ if not kwargs["fan_in_fan_out"]:
202
+ warnings.warn(
203
+ "fan_in_fan_out is set to False but the target module is `Conv1D`. "
204
+ "Setting fan_in_fan_out to True."
205
+ )
206
+ kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = True
207
+ else:
208
+ raise ValueError(
209
+ f"Target module {target} is not supported. "
210
+ f"Currently, only `torch.nn.Linear` and `Conv1D` are supported."
211
+ )
212
+ new_module = SVDLinear(target, adapter_name, **kwargs)
213
+
214
+ return new_module
215
+
216
+ @staticmethod
217
+ def _prepare_adapter_config(peft_config, model_config):
218
+ if peft_config.target_modules is None:
219
+ if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING:
220
+ raise ValueError("Please specify `target_modules` in `peft_config`")
221
+ peft_config.target_modules = TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING[
222
+ model_config["model_type"]
223
+ ]
224
+ return peft_config
225
+
226
+ def __getattr__(self, name: str):
227
+ """Forward missing attributes to the wrapped module."""
228
+ try:
229
+ return super().__getattr__(name) # defer to nn.Module's logic
230
+ except AttributeError:
231
+ return getattr(self.model, name)
232
+
233
+ def forward(self, *args, **kwargs):
234
+ outputs = self.model.forward(*args, **kwargs)
235
+
236
+ if (getattr(outputs, "loss", None) is not None) and isinstance(outputs.loss, torch.Tensor):
237
+ # Calculate the orthogonal regularization
238
+ orth_reg_weight = self.peft_config[self.trainable_adapter_name].orth_reg_weight
239
+
240
+ if orth_reg_weight <= 0:
241
+ raise ValueError("orth_reg_weight should be greater than 0. ")
242
+
243
+ regu_loss = 0
244
+ num_param = 0
245
+ for n, p in self.model.named_parameters():
246
+ if ("lora_A" in n or "lora_B" in n) and self.trainable_adapter_name in n:
247
+ para_cov = p @ p.T if "lora_A" in n else p.T @ p
248
+ I = torch.eye(*para_cov.size(), out=torch.empty_like(para_cov)) # noqa: E741
249
+ I.requires_grad = False
250
+ num_param += 1
251
+ regu_loss += torch.norm(para_cov - I, p="fro")
252
+ if num_param > 0:
253
+ regu_loss = regu_loss / num_param
254
+ else:
255
+ regu_loss = 0
256
+ outputs.loss += orth_reg_weight * regu_loss
257
+ return outputs
258
+
259
+ def resize_modules_by_rank_pattern(self, rank_pattern, adapter_name):
260
+ lora_config = self.peft_config[adapter_name]
261
+ for name, rank_idx in rank_pattern.items():
262
+ if isinstance(rank_idx, list):
263
+ rank = sum(rank_idx)
264
+ elif isinstance(rank_idx, torch.Tensor):
265
+ rank_idx = rank_idx.view(-1)
266
+ rank = rank_idx.sum().item()
267
+ else:
268
+ raise ValueError("Unexpected type of rank_idx")
269
+ key = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1])
270
+ _, target, _ = _get_submodules(self.model, key)
271
+ lora_E_weights = target.lora_E[adapter_name][rank_idx]
272
+ lora_A_weights = target.lora_A[adapter_name][rank_idx]
273
+ lora_B_weights = target.lora_B[adapter_name][:, rank_idx]
274
+ ranknum = target.ranknum[adapter_name]
275
+ target.update_layer(
276
+ adapter_name,
277
+ rank,
278
+ lora_config.lora_alpha,
279
+ lora_config.lora_dropout,
280
+ lora_config.init_lora_weights,
281
+ )
282
+ with torch.no_grad():
283
+ if rank > 0:
284
+ target.lora_E[adapter_name].copy_(lora_E_weights)
285
+ target.lora_A[adapter_name].copy_(lora_A_weights)
286
+ target.lora_B[adapter_name].copy_(lora_B_weights)
287
+ # The scaling is exactly as the previous
288
+ target.ranknum[adapter_name].copy_(ranknum)
289
+
290
+ def resize_state_dict_by_rank_pattern(self, rank_pattern, state_dict, adapter_name):
291
+ for name, rank_idx in rank_pattern.items():
292
+ rank = sum(rank_idx)
293
+ prefix = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1])
294
+ for layer in ["lora_E", "lora_A", "lora_B"]:
295
+ key = f"base_model.model.{prefix}.{layer}.{adapter_name}"
296
+ if layer != "lora_B":
297
+ state_dict[key] = (
298
+ state_dict[key][rank_idx] if rank != state_dict[key].shape[0] else state_dict[key]
299
+ )
300
+ else:
301
+ state_dict[key] = (
302
+ state_dict[key][:, rank_idx] if rank != state_dict[key].shape[1] else state_dict[key]
303
+ )
304
+ return state_dict
305
+
306
+ def update_and_allocate(self, global_step):
307
+ """
308
+ This method updates Adalora budget and mask.
309
+
310
+ This should be called in every training step after `loss.backward()` and before `zero_grad()`.
311
+
312
+ `tinit`, `tfinal` and `deltaT` are handled with in the method.
313
+
314
+ Args:
315
+ global_step (`int`): The current training step, it is used to calculate adalora budget.
316
+
317
+ Example:
318
+
319
+ ```python
320
+ >>> loss = model(**input).loss
321
+ >>> loss.backward()
322
+ >>> optimizer.step()
323
+ >>> model.base_model.update_and_allocate(i_step)
324
+ >>> optimizer.zero_grad()
325
+ ```
326
+ """
327
+ lora_config = self.peft_config[self.trainable_adapter_name]
328
+ # Update the importance score and allocate the budget
329
+ if global_step < lora_config.total_step - lora_config.tfinal:
330
+ _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step)
331
+ if rank_pattern:
332
+ lora_config.rank_pattern = rank_pattern
333
+ # Finalize the budget allocation
334
+ elif global_step == lora_config.total_step - lora_config.tfinal:
335
+ _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step, force_mask=True)
336
+ # for some reason, this freezes the trainable parameters and nothing gets updates
337
+ # self.resize_modules_by_rank_pattern(rank_pattern, self.trainable_adapter_name)
338
+ lora_config.rank_pattern = rank_pattern
339
+ self.rankallocator.reset_ipt()
340
+ # Currently using inefficient way to mask the unimportant weights using the rank pattern
341
+ # due to problem mentioned above
342
+ elif global_step > lora_config.total_step - lora_config.tfinal:
343
+ self.rankallocator.mask_using_rank_pattern(self.model, lora_config.rank_pattern)
344
+ # Pass the function and do forward propagation
345
+ else:
346
+ return None
llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (808 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/bnb.cpython-310.pyc ADDED
Binary file (2.57 kB). View file
 
llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/config.cpython-310.pyc ADDED
Binary file (4.25 kB). View file
 
llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/layer.cpython-310.pyc ADDED
Binary file (7.63 kB). View file
 
llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/model.cpython-310.pyc ADDED
Binary file (13.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/peft/tuners/ia3/model.py ADDED
@@ -0,0 +1,394 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from __future__ import annotations
15
+
16
+ import re
17
+ import warnings
18
+ from dataclasses import asdict
19
+ from enum import Enum
20
+ from typing import Optional
21
+
22
+ import torch
23
+ from torch import nn
24
+ from transformers.pytorch_utils import Conv1D
25
+
26
+ from peft.import_utils import is_bnb_4bit_available, is_bnb_available
27
+ from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists
28
+ from peft.utils import (
29
+ TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING,
30
+ TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING,
31
+ ModulesToSaveWrapper,
32
+ _get_submodules,
33
+ )
34
+
35
+ from .layer import Conv2d, IA3Layer, Linear
36
+
37
+
38
+ class IA3Model(BaseTuner):
39
+ """
40
+ Creates a Infused Adapter by Inhibiting and Amplifying Inner Activations ((IA)^3) model from a pretrained
41
+ transformers model. The method is described in detail in https://arxiv.org/abs/2205.05638
42
+
43
+ Args:
44
+ model ([`~transformers.PreTrainedModel`]): The model to be adapted.
45
+ config ([`IA3Config`]): The configuration of the (IA)^3 model.
46
+ adapter_name (`str`): The name of the adapter, defaults to `"default"`.
47
+
48
+ Returns:
49
+ `torch.nn.Module`: The (IA)^3 model.
50
+
51
+ Example:
52
+
53
+ ```py
54
+ >>> from transformers import AutoModelForSeq2SeqLM, ia3Config
55
+ >>> from peft import IA3Model, IA3Config
56
+
57
+ >>> config = IA3Config(
58
+ ... peft_type="IA3",
59
+ ... task_type="SEQ_2_SEQ_LM",
60
+ ... target_modules=["k", "v", "w0"],
61
+ ... feedforward_modules=["w0"],
62
+ ... )
63
+
64
+ >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
65
+ >>> ia3_model = IA3Model(config, model)
66
+ ```
67
+
68
+ **Attributes**:
69
+ - **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted.
70
+ - **peft_config** ([`ia3Config`]): The configuration of the (IA)^3 model.
71
+ """
72
+
73
+ prefix: str = "ia3_"
74
+
75
+ def __init__(self, model, config, adapter_name):
76
+ super().__init__(model, config, adapter_name)
77
+
78
+ @staticmethod
79
+ def _create_new_module(ia3_config, adapter_name, target, **kwargs):
80
+ # avoid eager bnb import
81
+ if is_bnb_available():
82
+ import bitsandbytes as bnb
83
+
84
+ from .bnb import Linear8bitLt
85
+
86
+ if is_bnb_4bit_available():
87
+ from .bnb import Linear4bit
88
+
89
+ loaded_in_8bit = kwargs.pop("loaded_in_8bit", False)
90
+ loaded_in_4bit = kwargs.pop("loaded_in_4bit", False)
91
+ is_feedforward = kwargs.pop("is_feedforward", False)
92
+
93
+ if isinstance(target, BaseTunerLayer):
94
+ target_base_layer = target.get_base_layer()
95
+ else:
96
+ target_base_layer = target
97
+
98
+ if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt):
99
+ eightbit_kwargs = kwargs.copy()
100
+ eightbit_kwargs.update(
101
+ {
102
+ "has_fp16_weights": target_base_layer.state.has_fp16_weights,
103
+ "memory_efficient_backward": target_base_layer.state.memory_efficient_backward,
104
+ "threshold": target_base_layer.state.threshold,
105
+ "index": target_base_layer.index,
106
+ }
107
+ )
108
+ new_module = Linear8bitLt(target, adapter_name, is_feedforward=is_feedforward, **eightbit_kwargs)
109
+ elif loaded_in_4bit and isinstance(target_base_layer, bnb.nn.Linear4bit):
110
+ fourbit_kwargs = kwargs.copy()
111
+ fourbit_kwargs.update(
112
+ {
113
+ "compute_dtype": target_base_layer.compute_dtype,
114
+ "compress_statistics": target_base_layer.weight.compress_statistics,
115
+ "quant_type": target_base_layer.weight.quant_type,
116
+ }
117
+ )
118
+ new_module = Linear4bit(target, adapter_name, is_feedforward=is_feedforward, **fourbit_kwargs)
119
+ elif isinstance(target, torch.nn.Conv2d):
120
+ new_module = Conv2d(target, adapter_name, is_feedforward=is_feedforward, **kwargs)
121
+ elif isinstance(target_base_layer, torch.nn.Linear):
122
+ if kwargs["fan_in_fan_out"]:
123
+ warnings.warn(
124
+ "fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. "
125
+ "Setting fan_in_fan_out to False."
126
+ )
127
+ kwargs["fan_in_fan_out"] = ia3_config.fan_in_fan_out = False
128
+ new_module = Linear(target, adapter_name, is_feedforward=is_feedforward, **kwargs)
129
+ elif isinstance(target_base_layer, Conv1D):
130
+ if not kwargs["fan_in_fan_out"]:
131
+ warnings.warn(
132
+ "fan_in_fan_out is set to False but the target module is `Conv1D`. "
133
+ "Setting fan_in_fan_out to True."
134
+ )
135
+ kwargs["fan_in_fan_out"] = ia3_config.fan_in_fan_out = True
136
+ new_module = Linear(
137
+ target, adapter_name, is_feedforward=is_feedforward, is_target_conv_1d_layer=True, **kwargs
138
+ )
139
+ else:
140
+ raise ValueError(
141
+ f"Target module {target} is not supported. "
142
+ f"Currently, only `torch.nn.Linear`, `torch.nn.Conv2d`, and `Conv1D` are supported."
143
+ )
144
+ return new_module
145
+
146
+ @staticmethod
147
+ def _check_target_module_exists(ia3_config, key):
148
+ return check_target_module_exists(ia3_config, key)
149
+
150
+ def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None:
151
+ for n, p in model.named_parameters():
152
+ if self.prefix not in n:
153
+ p.requires_grad = False
154
+
155
+ def _create_and_replace(
156
+ self,
157
+ ia3_config,
158
+ adapter_name,
159
+ target,
160
+ target_name,
161
+ parent,
162
+ current_key,
163
+ ):
164
+ # check if target module is in feedforward_modules
165
+ is_feedforward = self._check_target_module_feedforward(ia3_config, current_key)
166
+
167
+ kwargs = {
168
+ "fan_in_fan_out": ia3_config.fan_in_fan_out,
169
+ "init_ia3_weights": ia3_config.init_ia3_weights,
170
+ "is_feedforward": is_feedforward,
171
+ "loaded_in_8bit": getattr(self.model, "is_loaded_in_8bit", False),
172
+ "loaded_in_4bit": getattr(self.model, "is_loaded_in_4bit", False),
173
+ }
174
+
175
+ if isinstance(target, IA3Layer):
176
+ target.update_layer(
177
+ adapter_name,
178
+ ia3_config.init_ia3_weights,
179
+ )
180
+ else:
181
+ new_module = self._create_new_module(ia3_config, adapter_name, target, **kwargs)
182
+ if adapter_name != self.active_adapter:
183
+ # adding an additional adapter: it is not automatically trainable
184
+ new_module.requires_grad_(False)
185
+ self._replace_module(parent, target_name, new_module, target)
186
+
187
+ @staticmethod
188
+ def _check_target_module_feedforward(ia3_config, key) -> bool:
189
+ """
190
+ A helper private method that checks if the target module `key` matches with a feedforward module specified in
191
+ `ia3_config`
192
+ """
193
+ if isinstance(ia3_config.feedforward_modules, str):
194
+ is_feedforward = bool(re.fullmatch(ia3_config.feedforward_modules, key))
195
+ else:
196
+ is_feedforward = any(key.endswith(target_key) for target_key in ia3_config.feedforward_modules)
197
+ return is_feedforward
198
+
199
+ def _replace_module(self, parent, child_name, new_module, child):
200
+ setattr(parent, child_name, new_module)
201
+
202
+ # child layer wraps the original module, unpack it
203
+ if hasattr(child, "base_layer"):
204
+ child = child.base_layer
205
+
206
+ # layers with base_layer don't need the weight to be copied, as they have a reference already
207
+ if not hasattr(new_module, "base_layer"):
208
+ new_module.weight = child.weight
209
+ if hasattr(child, "bias"):
210
+ new_module.bias = child.bias
211
+
212
+ if getattr(child, "state", None) is not None:
213
+ if hasattr(new_module, "base_layer"):
214
+ new_module.base_layer.state = child.state
215
+ else:
216
+ new_module.state = child.state
217
+ new_module.to(child.weight.device)
218
+
219
+ # dispatch to correct device
220
+ for name, module in new_module.named_modules():
221
+ if self.prefix in name:
222
+ module.to(child.weight.device)
223
+
224
+ def __getattr__(self, name: str):
225
+ """Forward missing attributes to the wrapped module."""
226
+ try:
227
+ return super().__getattr__(name) # defer to nn.Module's logic
228
+ except AttributeError:
229
+ return getattr(self.model, name)
230
+
231
+ def get_peft_config_as_dict(self, inference: bool = False):
232
+ config_dict = {}
233
+ for key, value in self.peft_config.items():
234
+ config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()}
235
+ if inference:
236
+ config["inference_mode"] = True
237
+ config_dict[key] = config
238
+ return config
239
+
240
+ def _set_adapter_layers(self, enabled=True):
241
+ for module in self.model.modules():
242
+ if isinstance(module, (IA3Layer, ModulesToSaveWrapper)):
243
+ module.enable_adapters(enabled)
244
+
245
+ def enable_adapter_layers(self) -> None:
246
+ """Enable all adapters.
247
+
248
+ Call this if you have previously disabled all adapters and want to re-enable them.
249
+ """
250
+ self._set_adapter_layers(enabled=True)
251
+
252
+ def disable_adapter_layers(self) -> None:
253
+ """Disable all adapters.
254
+
255
+ When disabling all adapters, the model output corresponds to the output of the base model.
256
+ """
257
+ self._set_adapter_layers(enabled=False)
258
+
259
+ def set_adapter(self, adapter_name: str | list[str]) -> None:
260
+ """Set the active adapter(s).
261
+
262
+ Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is
263
+ not desired, use the following code.
264
+
265
+ ```py
266
+ >>> for name, param in model_peft.named_parameters():
267
+ ... if ...: # some check on name (ex. if 'lora' in name)
268
+ ... param.requires_grad = False
269
+ ```
270
+
271
+ Args:
272
+ adapter_name (`str` or `list[str]`): Name of the adapter(s) to be activated.
273
+ """
274
+ for module in self.model.modules():
275
+ if isinstance(module, IA3Layer):
276
+ if module.merged:
277
+ warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.")
278
+ module.unmerge()
279
+ module.set_adapter(adapter_name)
280
+
281
+ def _prepare_adapter_config(self, peft_config, model_config):
282
+ if peft_config.target_modules is None:
283
+ if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING:
284
+ raise ValueError("Please specify `target_modules` in `peft_config`")
285
+ peft_config.target_modules = TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING[model_config["model_type"]]
286
+ if peft_config.feedforward_modules is None:
287
+ if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING:
288
+ raise ValueError("Please specify `feedforward_modules` in `peft_config`")
289
+ peft_config.feedforward_modules = TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING[
290
+ model_config["model_type"]
291
+ ]
292
+ return peft_config
293
+
294
+ def _unload_and_optionally_merge(
295
+ self, merge: bool = True, safe_merge: bool = False, adapter_names: Optional[list[str]] = None
296
+ ):
297
+ r"""
298
+ This method merges the (IA)^3 layers into the base model. This is needed if someone wants to use the base model
299
+ as a standalone model.
300
+
301
+ Args:
302
+ safe_merge (`bool`, `optional`, defaults to `False`):
303
+ If True, the merge operation will be performed in a copy of the original weights and check for NaNs
304
+ before merging the weights. This is useful if you want to check if the merge operation will produce
305
+ NaNs. Defaults to `False`.
306
+ adapter_names (`List[str]`, *optional*):
307
+ The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
308
+ to `None`.
309
+ """
310
+ if getattr(self.model, "is_loaded_in_8bit", False):
311
+ raise ValueError("Cannot merge ia3 layers when the model is loaded in 8-bit mode")
312
+
313
+ if getattr(self.model, "is_loaded_in_4bit", False):
314
+ raise ValueError("Cannot merge ia3 layers when the model is loaded in 4-bit mode")
315
+
316
+ self._unloading_checks(adapter_names)
317
+ key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key]
318
+ for key in key_list:
319
+ try:
320
+ parent, target, target_name = _get_submodules(self.model, key)
321
+ except AttributeError:
322
+ continue
323
+
324
+ if hasattr(target, "base_layer"):
325
+ if merge:
326
+ target.merge(safe_merge=safe_merge, adapter_names=adapter_names)
327
+ self._replace_module(parent, target_name, target.get_base_layer(), target)
328
+ elif isinstance(target, ModulesToSaveWrapper):
329
+ # save any additional trainable modules part of `modules_to_save`
330
+ new_module = target.modules_to_save[target.active_adapter]
331
+ if hasattr(new_module, "base_layer"):
332
+ # check if the module is itself a tuner layer
333
+ if merge:
334
+ new_module.merge(safe_merge=safe_merge, adapter_names=adapter_names)
335
+ new_module = new_module.get_base_layer()
336
+ setattr(parent, target_name, new_module)
337
+
338
+ return self.model
339
+
340
+ def merge_and_unload(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> torch.nn.Module:
341
+ r"""
342
+ This method merges the IA³ layers into the base model. This is needed if someone wants to use the base model as
343
+ a standalone model.
344
+
345
+ Args:
346
+ safe_merge (`bool`):
347
+ whether to activate the safe merging check to check if there is any potential Nan in the adapter
348
+ weights
349
+ adapter_names (`List[str]`, *optional*):
350
+ The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
351
+ to `None`.
352
+
353
+ Example:
354
+
355
+ ```py
356
+ >>> from transformers import AutoModelForCausalLM
357
+ >>> from peft import PeftModel
358
+
359
+ >>> base_model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-40b")
360
+ >>> peft_model_id = "smangrul/falcon-40B-int4-peft-lora-sfttrainer-sample"
361
+ >>> model = PeftModel.from_pretrained(base_model, peft_model_id)
362
+ >>> merged_model = model.merge_and_unload()
363
+ ```
364
+ """
365
+ return self._unload_and_optionally_merge(safe_merge=safe_merge, adapter_names=adapter_names)
366
+
367
+ def unload(self) -> torch.nn.Module:
368
+ """
369
+ Gets back the base model by removing all the IA³ modules without merging. This gives back the original base
370
+ model.
371
+ """
372
+ return self._unload_and_optionally_merge(merge=False)
373
+
374
+ def delete_adapter(self, adapter_name: str) -> None:
375
+ """
376
+ Deletes an existing adapter.
377
+
378
+ Args:
379
+ adapter_name (str): Name of the adapter to be deleted.
380
+ """
381
+ if adapter_name not in self.peft_config:
382
+ raise ValueError(f"Adapter {adapter_name} does not exist")
383
+ del self.peft_config[adapter_name]
384
+
385
+ key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key]
386
+ new_adapter = None
387
+ for key in key_list:
388
+ _, target, _ = _get_submodules(self.model, key)
389
+ if isinstance(target, IA3Layer):
390
+ target.delete_adapter(adapter_name)
391
+ if new_adapter is None:
392
+ new_adapter = target.active_adapters[:]
393
+
394
+ self.active_adapter = new_adapter or []
llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/__init__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .config import LoHaConfig
16
+ from .layer import Conv2d, Linear, LoHaLayer
17
+ from .model import LoHaModel
18
+
19
+
20
+ __all__ = ["LoHaConfig", "LoHaModel", "Conv2d", "Linear", "LoHaLayer"]
llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (388 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/config.cpython-310.pyc ADDED
Binary file (5.36 kB). View file
 
llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/layer.cpython-310.pyc ADDED
Binary file (10.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/model.cpython-310.pyc ADDED
Binary file (3.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/config.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from dataclasses import dataclass, field
16
+ from typing import List, Optional, Union
17
+
18
+ from peft.tuners.lycoris_utils import LycorisConfig
19
+ from peft.utils import PeftType
20
+
21
+
22
+ @dataclass
23
+ class LoHaConfig(LycorisConfig):
24
+ """
25
+ This is the configuration class to store the configuration of a [`LoHaModel`].
26
+
27
+ Args:
28
+ r (`int`):
29
+ LoHa rank.
30
+ alpha (`int`):
31
+ The alpha parameter for LoHa scaling.
32
+ rank_dropout (`float`):
33
+ The dropout probability for rank dimension during training.
34
+ module_dropout (`float`):
35
+ The dropout probability for disabling LoHa modules during training.
36
+ use_effective_conv2d (`bool`):
37
+ Use parameter effective decomposition for Conv2d with ksize > 1 ("Proposition 3" from FedPara paper).
38
+ target_modules (`Optional[Union[List[str], str]]`):
39
+ The names of the modules to apply the adapter to. If this is specified, only the modules with the specified
40
+ names will be replaced. When passing a string, a regex match will be performed. When passing a list of
41
+ strings, either an exact match will be performed or it is checked if the name of the module ends with any
42
+ of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen,
43
+ excluding the output layer. If this is not specified, modules will be chosen according to the model
44
+ architecture. If the architecture is not known, an error will be raised -- in this case, you should specify
45
+ the target modules manually.
46
+ init_weights (`bool`):
47
+ Whether to perform initialization of adapter weights. This defaults to `True`, passing `False` is
48
+ discouraged.
49
+ layers_to_transform (`Union[List[int], int]`):
50
+ The layer indices to transform. If a list of ints is passed, it will apply the adapter to the layer indices
51
+ that are specified in this list. If a single integer is passed, it will apply the transformations on the
52
+ layer at this index.
53
+ layers_pattern (`str`):
54
+ The layer pattern name, used only if `layers_to_transform` is different from `None`.
55
+ rank_pattern (`dict`):
56
+ The mapping from layer names or regexp expression to ranks which are different from the default rank
57
+ specified by `r`.
58
+ alpha_pattern (`dict`):
59
+ The mapping from layer names or regexp expression to alphas which are different from the default alpha
60
+ specified by `alpha`.
61
+ modules_to_save (`Optional[List[str]]`):
62
+ List of modules apart from adapter layers to be set as trainable and saved in the final checkpoint.
63
+ """
64
+
65
+ r: int = field(default=8, metadata={"help": "LoHa rank"})
66
+ alpha: int = field(default=8, metadata={"help": "LoHa alpha"})
67
+ rank_dropout: float = field(
68
+ default=0.0, metadata={"help": "The dropout probability for rank dimension during training"}
69
+ )
70
+ module_dropout: float = field(
71
+ default=0.0, metadata={"help": "The dropout probability for disabling LoHa modules during training"}
72
+ )
73
+ use_effective_conv2d: bool = field(
74
+ default=False,
75
+ metadata={
76
+ "help": 'Use parameter effective decomposition for Conv2d 3x3 with ksize > 1 ("Proposition 3" from FedPara paper)'
77
+ },
78
+ )
79
+ target_modules: Optional[Union[List[str], str]] = field(
80
+ default=None,
81
+ metadata={
82
+ "help": "List of module names or regex expression of the module names to replace with LoHa."
83
+ "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' "
84
+ "This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer."
85
+ },
86
+ )
87
+ init_weights: bool = field(
88
+ default=True,
89
+ metadata={
90
+ "help": (
91
+ "Whether to initialize the weights of the LoHa layers with their default initialization. Don't change "
92
+ "this setting, except if you know exactly what you're doing."
93
+ ),
94
+ },
95
+ )
96
+ layers_to_transform: Optional[Union[List[int], int]] = field(
97
+ default=None,
98
+ metadata={
99
+ "help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index."
100
+ },
101
+ )
102
+ layers_pattern: Optional[str] = field(
103
+ default=None,
104
+ metadata={
105
+ "help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern."
106
+ },
107
+ )
108
+ modules_to_save: Optional[List[str]] = field(
109
+ default=None,
110
+ metadata={
111
+ "help": "List of modules apart from LoHA layers to be set as trainable and saved in the final checkpoint. "
112
+ "For example, in Sequence Classification or Token Classification tasks, "
113
+ "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved."
114
+ },
115
+ )
116
+
117
+ def __post_init__(self):
118
+ self.peft_type = PeftType.LOHA
119
+ self.target_modules = (
120
+ set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
121
+ )
llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/layer.py ADDED
@@ -0,0 +1,375 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+ from typing import Any, Set, Tuple
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ import torch.nn.functional as F
21
+
22
+ from peft.tuners.lycoris_utils import LycorisLayer
23
+
24
+
25
+ class LoHaLayer(nn.Module, LycorisLayer):
26
+ # All names of layers that may contain adapter weights
27
+ adapter_layer_names = ("hada_w1_a", "hada_w1_b", "hada_w2_a", "hada_w2_b", "hada_t1", "hada_t2")
28
+ # other_param_names is defined on parent class
29
+
30
+ def __init__(self, base_layer: nn.Module):
31
+ super().__init__()
32
+ LycorisLayer.__init__(self, base_layer)
33
+
34
+ # LoHa info
35
+ self.hada_w1_a = nn.ParameterDict({})
36
+ self.hada_w1_b = nn.ParameterDict({})
37
+ self.hada_w2_a = nn.ParameterDict({})
38
+ self.hada_w2_b = nn.ParameterDict({})
39
+ self.hada_t1 = nn.ParameterDict({})
40
+ self.hada_t2 = nn.ParameterDict({})
41
+
42
+ @property
43
+ def _available_adapters(self) -> Set[str]:
44
+ return {*self.hada_w1_a, *self.hada_w1_b, *self.hada_w2_a, *self.hada_w2_b, *self.hada_t1, *self.hada_t2}
45
+
46
+ def create_adapter_parameters(self, adapter_name: str, r: int, shape: Tuple[int, ...]):
47
+ # https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py#L130C9-L143C75
48
+ if len(shape) == 4:
49
+ self.hada_t1[adapter_name] = nn.Parameter(torch.empty(r, r, shape[2], shape[3]))
50
+ self.hada_w1_a[adapter_name] = nn.Parameter(torch.empty(r, shape[0])) # out_dim, 1-mode
51
+ self.hada_w1_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1])) # in_dim , 2-mode
52
+
53
+ self.hada_t2[adapter_name] = nn.Parameter(torch.empty(r, r, shape[2], shape[3]))
54
+ self.hada_w2_a[adapter_name] = nn.Parameter(torch.empty(r, shape[0])) # out_dim, 1-mode
55
+ self.hada_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1])) # in_dim , 2-mode
56
+ else:
57
+ self.hada_w1_a[adapter_name] = nn.Parameter(torch.empty(shape[0], r))
58
+ self.hada_w1_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1]))
59
+
60
+ self.hada_w2_a[adapter_name] = nn.Parameter(torch.empty(shape[0], r))
61
+ self.hada_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1]))
62
+
63
+ def reset_adapter_parameters(self, adapter_name: str):
64
+ # Original implementation performs initialization with normal distribution
65
+ # https://github.com/KohakuBlueleaf/LyCORIS/blob/3549fdef8f564761d68b695a08ef88b1122fdedc/lycoris/modules/loha.py#L158
66
+
67
+ # FedPara paper proposes to perform He initialization, let's stick with it
68
+ # It is enough to initialize only single matrix with zeros to make adapter do nothing after initialization
69
+ if adapter_name in self.hada_w1_a.keys():
70
+ nn.init.kaiming_uniform_(self.hada_w1_a[adapter_name], a=math.sqrt(5))
71
+ nn.init.kaiming_uniform_(self.hada_w1_b[adapter_name], a=math.sqrt(5))
72
+ nn.init.kaiming_uniform_(self.hada_w2_a[adapter_name], a=math.sqrt(5))
73
+ nn.init.zeros_(self.hada_w2_b[adapter_name])
74
+ if adapter_name in self.hada_t1.keys():
75
+ nn.init.kaiming_uniform_(self.hada_t1[adapter_name], a=math.sqrt(5))
76
+ nn.init.kaiming_uniform_(self.hada_t2[adapter_name], a=math.sqrt(5))
77
+
78
+ def reset_adapter_parameters_random(self, adapter_name: str):
79
+ # Original implementation performs initialization with normal distribution
80
+ # https://github.com/KohakuBlueleaf/LyCORIS/blob/3549fdef8f564761d68b695a08ef88b1122fdedc/lycoris/modules/loha.py#L158
81
+
82
+ # FedPara paper proposes to perform He initialization, let's stick with it
83
+ # It is enough to initialize only single matrix with zeros to make adapter do nothing after initialization
84
+ if adapter_name in self.hada_w1_a.keys():
85
+ nn.init.kaiming_uniform_(self.hada_w1_a[adapter_name], a=math.sqrt(5))
86
+ nn.init.kaiming_uniform_(self.hada_w1_b[adapter_name], a=math.sqrt(5))
87
+ nn.init.kaiming_uniform_(self.hada_w2_a[adapter_name], a=math.sqrt(5))
88
+ nn.init.kaiming_uniform_(self.hada_w2_b[adapter_name], a=math.sqrt(5))
89
+ if adapter_name in self.hada_t1.keys():
90
+ nn.init.kaiming_uniform_(self.hada_t1[adapter_name], a=math.sqrt(5))
91
+ nn.init.kaiming_uniform_(self.hada_t2[adapter_name], a=math.sqrt(5))
92
+
93
+ def update_layer(
94
+ self,
95
+ adapter_name: str,
96
+ r: int,
97
+ alpha: float,
98
+ rank_dropout: float,
99
+ module_dropout: float,
100
+ init_weights: bool,
101
+ use_effective_conv2d: bool = False,
102
+ **kwargs,
103
+ ) -> None:
104
+ """Internal function to create loha adapter
105
+
106
+ Args:
107
+ adapter_name (`str`): Name for the adapter to add.
108
+ r (`int`): Rank for the added adapter.
109
+ alpha (`float`): Alpha for the added adapter.
110
+ rank_dropout (`float`): The dropout probability for rank dimension during training.
111
+ module_dropout (`float`): The dropout probability for disabling adapter during training.
112
+ init_weights (`bool`): Whether to initialize weights.
113
+ use_effective_conv2d (`bool`, *optional*, defaults to `False`):
114
+ Use parameter effective decomposition for Conv2d with ksize > 1.
115
+ """
116
+ if r <= 0:
117
+ raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
118
+
119
+ self.r[adapter_name] = r
120
+ self.alpha[adapter_name] = alpha
121
+ self.scaling[adapter_name] = alpha / r
122
+ self.rank_dropout[adapter_name] = rank_dropout
123
+ self.module_dropout[adapter_name] = module_dropout
124
+
125
+ # Determine shape of LoHa weights
126
+ base_layer = self.get_base_layer()
127
+ if isinstance(base_layer, nn.Linear):
128
+ shape = tuple(base_layer.weight.shape)
129
+ elif isinstance(base_layer, nn.Conv2d):
130
+ use_effective_conv2d = use_effective_conv2d and base_layer.kernel_size != (1, 1)
131
+ if use_effective_conv2d:
132
+ shape = (base_layer.out_channels, base_layer.in_channels, *base_layer.kernel_size)
133
+ else:
134
+ shape = (
135
+ base_layer.out_channels,
136
+ base_layer.in_channels * base_layer.kernel_size[0] * base_layer.kernel_size[1],
137
+ )
138
+ else:
139
+ raise TypeError(f"LoHa is not implemented for base layers of type {type(base_layer).__name__}")
140
+
141
+ # Create weights with provided shape
142
+ self.create_adapter_parameters(adapter_name, r, shape)
143
+
144
+ # Initialize weights
145
+ if init_weights:
146
+ self.reset_adapter_parameters(adapter_name)
147
+ else:
148
+ self.reset_adapter_parameters_random(adapter_name)
149
+
150
+ # Move new weights to device
151
+ weight = getattr(self.get_base_layer(), "weight", None)
152
+ if weight is not None:
153
+ # the layer is already completely initialized, this is an update
154
+ if weight.dtype.is_floating_point or weight.dtype.is_complex:
155
+ self.to(weight.device, dtype=weight.dtype)
156
+ else:
157
+ self.to(weight.device)
158
+ self.set_adapter(self.active_adapters)
159
+
160
+ def get_delta_weight(self, adapter_name: str) -> torch.Tensor:
161
+ # https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py#L178
162
+ if adapter_name in self.hada_t1.keys():
163
+ weight = make_weight_cp(
164
+ self.hada_t1[adapter_name],
165
+ self.hada_w1_a[adapter_name],
166
+ self.hada_w1_b[adapter_name],
167
+ self.hada_t2[adapter_name],
168
+ self.hada_w2_a[adapter_name],
169
+ self.hada_w2_b[adapter_name],
170
+ scale=torch.tensor(self.scaling[adapter_name]),
171
+ )
172
+ else:
173
+ weight = make_weight(
174
+ self.hada_w1_a[adapter_name],
175
+ self.hada_w1_b[adapter_name],
176
+ self.hada_w2_a[adapter_name],
177
+ self.hada_w2_b[adapter_name],
178
+ scale=torch.tensor(self.scaling[adapter_name]),
179
+ )
180
+
181
+ base_layer = self.get_base_layer()
182
+ weight = weight.reshape(base_layer.weight.shape)
183
+
184
+ # Perform rank dropout during training - drop rows of addition weights
185
+ rank_dropout = self.rank_dropout[adapter_name]
186
+ if self.training and rank_dropout:
187
+ drop = (torch.rand(weight.size(0)) > rank_dropout).to(weight.dtype)
188
+ drop = drop.view(-1, *[1] * len(weight.shape[1:])).to(weight.device)
189
+ # TODO: Investigate if there should be a scaler like in normal dropout during training
190
+ # Original implementation doesn't have it
191
+ # https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py#L193
192
+ drop /= drop.mean()
193
+ weight *= drop
194
+
195
+ return weight
196
+
197
+ def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
198
+ previous_dtype = x.dtype
199
+
200
+ if self.disable_adapters:
201
+ if self.merged:
202
+ self.unmerge()
203
+ result = self.base_layer(x, *args, **kwargs)
204
+ elif self.merged:
205
+ result = self.base_layer(x, *args, **kwargs)
206
+ else:
207
+ result = self.base_layer(x, *args, **kwargs)
208
+
209
+ # Execute all the adapters
210
+ for active_adapter in self.active_adapters:
211
+ if active_adapter not in self._available_adapters:
212
+ continue
213
+
214
+ module_dropout = self.module_dropout[active_adapter]
215
+
216
+ # Modify current execution weights
217
+ if (not self.training) or (self.training and torch.rand(1) > module_dropout):
218
+ result = result + self._get_delta_activations(active_adapter, x, *args, **kwargs)
219
+
220
+ result = result.to(previous_dtype)
221
+ return result
222
+
223
+
224
+ class Linear(LoHaLayer):
225
+ """LoHa implemented in Linear layer"""
226
+
227
+ def __init__(
228
+ self,
229
+ base_layer: nn.Module,
230
+ adapter_name: str = "default",
231
+ r: int = 0,
232
+ alpha: float = 0.0,
233
+ rank_dropout: float = 0.0,
234
+ module_dropout: float = 0.0,
235
+ init_weights: bool = True,
236
+ **kwargs,
237
+ ):
238
+ super().__init__(base_layer)
239
+
240
+ # Create adapter and set it active
241
+ self._active_adapter = adapter_name
242
+ self.update_layer(adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, **kwargs)
243
+
244
+ def _get_delta_activations(
245
+ self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any
246
+ ) -> torch.Tensor:
247
+ delta_weight = self.get_delta_weight(adapter_name)
248
+ # don't add bias here, because the bias is already included in the output of the base_layer
249
+ return F.linear(input, delta_weight)
250
+
251
+ def __repr__(self) -> str:
252
+ rep = super().__repr__()
253
+ return "loha." + rep
254
+
255
+
256
+ class Conv2d(LoHaLayer):
257
+ """LoHa implemented in Conv2d layer"""
258
+
259
+ def __init__(
260
+ self,
261
+ base_layer: nn.Module,
262
+ adapter_name: str = "default",
263
+ r: int = 0,
264
+ alpha: float = 0.0,
265
+ rank_dropout: float = 0.0,
266
+ module_dropout: float = 0.0,
267
+ use_effective_conv2d: bool = False,
268
+ init_weights: bool = True,
269
+ **kwargs,
270
+ ):
271
+ super().__init__(base_layer)
272
+
273
+ # Create adapter and set it active
274
+ self._active_adapter = adapter_name
275
+ self.update_layer(
276
+ adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, use_effective_conv2d, **kwargs
277
+ )
278
+
279
+ def _get_delta_activations(
280
+ self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any
281
+ ) -> torch.Tensor:
282
+ delta_weight = self.get_delta_weight(adapter_name)
283
+ # don't add bias here, because the bias is already included in the output of the base_layer
284
+ base_layer = self.get_base_layer()
285
+ return F.conv2d(
286
+ input,
287
+ delta_weight,
288
+ stride=base_layer.stride,
289
+ padding=base_layer.padding,
290
+ dilation=base_layer.dilation,
291
+ groups=base_layer.groups,
292
+ )
293
+
294
+ def __repr__(self) -> str:
295
+ rep = super().__repr__()
296
+ return "loha." + rep
297
+
298
+
299
+ # Below code is a direct copy from https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py#L9
300
+
301
+
302
+ class HadaWeight(torch.autograd.Function):
303
+ @staticmethod
304
+ def forward(ctx, w1a, w1b, w2a, w2b, scale=torch.tensor(1)):
305
+ ctx.save_for_backward(w1a, w1b, w2a, w2b, scale)
306
+ diff_weight = ((w1a @ w1b) * (w2a @ w2b)) * scale
307
+ return diff_weight
308
+
309
+ @staticmethod
310
+ def backward(ctx, grad_out):
311
+ (w1a, w1b, w2a, w2b, scale) = ctx.saved_tensors
312
+ grad_out = grad_out * scale
313
+ temp = grad_out * (w2a @ w2b)
314
+ grad_w1a = temp @ w1b.T
315
+ grad_w1b = w1a.T @ temp
316
+
317
+ temp = grad_out * (w1a @ w1b)
318
+ grad_w2a = temp @ w2b.T
319
+ grad_w2b = w2a.T @ temp
320
+
321
+ del temp
322
+ return grad_w1a, grad_w1b, grad_w2a, grad_w2b, None
323
+
324
+
325
+ class HadaWeightCP(torch.autograd.Function):
326
+ @staticmethod
327
+ def forward(ctx, t1, w1a, w1b, t2, w2a, w2b, scale=torch.tensor(1)):
328
+ ctx.save_for_backward(t1, w1a, w1b, t2, w2a, w2b, scale)
329
+
330
+ rebuild1 = torch.einsum("i j k l, j r, i p -> p r k l", t1, w1b, w1a)
331
+ rebuild2 = torch.einsum("i j k l, j r, i p -> p r k l", t2, w2b, w2a)
332
+
333
+ return rebuild1 * rebuild2 * scale
334
+
335
+ @staticmethod
336
+ def backward(ctx, grad_out):
337
+ (t1, w1a, w1b, t2, w2a, w2b, scale) = ctx.saved_tensors
338
+ grad_out = grad_out * scale
339
+
340
+ temp = torch.einsum("i j k l, j r -> i r k l", t2, w2b)
341
+ rebuild = torch.einsum("i j k l, i r -> r j k l", temp, w2a)
342
+
343
+ grad_w = rebuild * grad_out
344
+ del rebuild
345
+
346
+ grad_w1a = torch.einsum("r j k l, i j k l -> r i", temp, grad_w)
347
+ grad_temp = torch.einsum("i j k l, i r -> r j k l", grad_w, w1a.T)
348
+ del grad_w, temp
349
+
350
+ grad_w1b = torch.einsum("i r k l, i j k l -> r j", t1, grad_temp)
351
+ grad_t1 = torch.einsum("i j k l, j r -> i r k l", grad_temp, w1b.T)
352
+ del grad_temp
353
+
354
+ temp = torch.einsum("i j k l, j r -> i r k l", t1, w1b)
355
+ rebuild = torch.einsum("i j k l, i r -> r j k l", temp, w1a)
356
+
357
+ grad_w = rebuild * grad_out
358
+ del rebuild
359
+
360
+ grad_w2a = torch.einsum("r j k l, i j k l -> r i", temp, grad_w)
361
+ grad_temp = torch.einsum("i j k l, i r -> r j k l", grad_w, w2a.T)
362
+ del grad_w, temp
363
+
364
+ grad_w2b = torch.einsum("i r k l, i j k l -> r j", t2, grad_temp)
365
+ grad_t2 = torch.einsum("i j k l, j r -> i r k l", grad_temp, w2b.T)
366
+ del grad_temp
367
+ return grad_t1, grad_w1a, grad_w1b, grad_t2, grad_w2a, grad_w2b, None
368
+
369
+
370
+ def make_weight(w1a, w1b, w2a, w2b, scale):
371
+ return HadaWeight.apply(w1a, w1b, w2a, w2b, scale)
372
+
373
+
374
+ def make_weight_cp(t1, w1a, w1b, t2, w2a, w2b, scale):
375
+ return HadaWeightCP.apply(t1, w1a, w1b, t2, w2a, w2b, scale)
llmeval-env/lib/python3.10/site-packages/peft/tuners/loha/model.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import re
16
+ from itertools import chain
17
+ from typing import Dict, Type, Union
18
+
19
+ import torch
20
+ from torch import nn
21
+
22
+ from peft.tuners.lycoris_utils import LycorisConfig, LycorisTuner
23
+
24
+ from .layer import Conv2d, Linear, LoHaLayer
25
+
26
+
27
+ class LoHaModel(LycorisTuner):
28
+ """
29
+ Creates Low-Rank Hadamard Product model from a pretrained model. The method is partially described in
30
+ https://arxiv.org/abs/2108.06098 Current implementation heavily borrows from
31
+ https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py
32
+
33
+ Args:
34
+ model (`torch.nn.Module`): The model to which the adapter tuner layers will be attached.
35
+ config ([`LoHaConfig`]): The configuration of the LoHa model.
36
+ adapter_name (`str`): The name of the adapter, defaults to `"default"`.
37
+
38
+ Returns:
39
+ `torch.nn.Module`: The LoHa model.
40
+
41
+ Example:
42
+ ```py
43
+ >>> from diffusers import StableDiffusionPipeline
44
+ >>> from peft import LoHaModel, LoHaConfig
45
+
46
+ >>> config_te = LoHaConfig(
47
+ ... r=8,
48
+ ... lora_alpha=32,
49
+ ... target_modules=["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"],
50
+ ... rank_dropout=0.0,
51
+ ... module_dropout=0.0,
52
+ ... init_weights=True,
53
+ ... )
54
+ >>> config_unet = LoHaConfig(
55
+ ... r=8,
56
+ ... lora_alpha=32,
57
+ ... target_modules=[
58
+ ... "proj_in",
59
+ ... "proj_out",
60
+ ... "to_k",
61
+ ... "to_q",
62
+ ... "to_v",
63
+ ... "to_out.0",
64
+ ... "ff.net.0.proj",
65
+ ... "ff.net.2",
66
+ ... ],
67
+ ... rank_dropout=0.0,
68
+ ... module_dropout=0.0,
69
+ ... init_weights=True,
70
+ ... use_effective_conv2d=True,
71
+ ... )
72
+
73
+ >>> model = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
74
+ >>> model.text_encoder = LoHaModel(model.text_encoder, config_te, "default")
75
+ >>> model.unet = LoHaModel(model.unet, config_unet, "default")
76
+ ```
77
+
78
+ **Attributes**:
79
+ - **model** ([`~torch.nn.Module`]) -- The model to be adapted.
80
+ - **peft_config** ([`LoHaConfig`]): The configuration of the LoHa model.
81
+ """
82
+
83
+ prefix: str = "hada_"
84
+ layers_mapping: Dict[Type[torch.nn.Module], Type[LoHaLayer]] = {
85
+ torch.nn.Conv2d: Conv2d,
86
+ torch.nn.Linear: Linear,
87
+ }
88
+
89
+ def _create_and_replace(
90
+ self,
91
+ config: LycorisConfig,
92
+ adapter_name: str,
93
+ target: Union[LoHaLayer, nn.Module],
94
+ target_name: str,
95
+ parent: nn.Module,
96
+ current_key: str,
97
+ ) -> None:
98
+ """
99
+ A private method to create and replace the target module with the adapter module.
100
+ """
101
+
102
+ # Regexp matching - Find key which matches current target_name in patterns provided
103
+ pattern_keys = list(chain(config.rank_pattern.keys(), config.alpha_pattern.keys()))
104
+ target_name_key = next(filter(lambda key: re.match(rf"(.*\.)?{key}$", current_key), pattern_keys), target_name)
105
+
106
+ kwargs = config.to_dict()
107
+ kwargs["r"] = config.rank_pattern.get(target_name_key, config.r)
108
+ kwargs["alpha"] = config.alpha_pattern.get(target_name_key, config.alpha)
109
+
110
+ if isinstance(target, LoHaLayer):
111
+ target.update_layer(adapter_name, **kwargs)
112
+ else:
113
+ new_module = self._create_new_module(config, adapter_name, target, **kwargs)
114
+ self._replace_module(parent, target_name, new_module, target)
llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/config.cpython-310.pyc ADDED
Binary file (14.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/aqlm.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Any, Optional
16
+
17
+ import torch
18
+
19
+ from peft.import_utils import is_aqlm_available
20
+ from peft.tuners.lora.layer import LoraLayer
21
+ from peft.tuners.tuners_utils import BaseTunerLayer
22
+
23
+
24
+ if is_aqlm_available():
25
+ from aqlm import QuantizedLinear
26
+
27
+
28
+ class AqlmLoraLinear(torch.nn.Module, LoraLayer):
29
+ def __init__(
30
+ self,
31
+ base_layer,
32
+ adapter_name: str,
33
+ r: int = 0,
34
+ lora_alpha: int = 1,
35
+ lora_dropout: float = 0.0,
36
+ init_lora_weights: bool = True,
37
+ use_rslora: bool = False,
38
+ **kwargs,
39
+ ):
40
+ super().__init__()
41
+ LoraLayer.__init__(self, base_layer)
42
+
43
+ self._active_adapter = adapter_name
44
+ self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora)
45
+
46
+ def forward(self, x: torch.Tensor):
47
+ # note: logic differs from default Linear because merging is not supported
48
+ result = self.base_layer(x)
49
+
50
+ if self.disable_adapters:
51
+ return result
52
+
53
+ for active_adapter in self.active_adapters:
54
+ if active_adapter not in self.lora_A.keys():
55
+ continue
56
+ lora_A = self.lora_A[active_adapter]
57
+ lora_B = self.lora_B[active_adapter]
58
+ dropout = self.lora_dropout[active_adapter]
59
+ scaling = self.scaling[active_adapter]
60
+
61
+ requires_conversion = not torch.is_autocast_enabled()
62
+ if requires_conversion:
63
+ expected_dtype = result.dtype
64
+ x = x.to(lora_A.weight.dtype)
65
+
66
+ output = lora_B(lora_A(dropout(x)))
67
+ if requires_conversion:
68
+ output = output.to(expected_dtype)
69
+ output = output * scaling
70
+ result += output
71
+ return result
72
+
73
+ def __repr__(self) -> str:
74
+ rep = super().__repr__()
75
+ return "lora." + rep
76
+
77
+ # TODO: Check if it is better as suggested by users https://github.com/PanQiWei/AutoGPTQ/pull/102
78
+ # def reset_lora_parameters(self, adapter_name):
79
+ # if adapter_name in self.lora_A.keys():
80
+ # torch.nn.init.xavier_uniform_(self.lora_A[adapter_name].weight)
81
+ # torch.nn.init.zeros_(self.lora_B[adapter_name].weight)
82
+
83
+
84
+ def dispatch_aqlm(
85
+ target: torch.nn.Module,
86
+ adapter_name: str,
87
+ **kwargs: Any,
88
+ ) -> Optional[torch.nn.Module]:
89
+ new_module = None
90
+
91
+ if isinstance(target, BaseTunerLayer):
92
+ target_base_layer = target.get_base_layer()
93
+ else:
94
+ target_base_layer = target
95
+
96
+ if is_aqlm_available() and isinstance(target_base_layer, QuantizedLinear):
97
+ new_module = AqlmLoraLinear(target, adapter_name, **kwargs)
98
+ target.qweight = target_base_layer.codes
99
+
100
+ return new_module
llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/awq.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import importlib.metadata as importlib_metadata
15
+ from typing import Any, Optional
16
+
17
+ import packaging.version
18
+ import torch
19
+
20
+ from peft.import_utils import is_auto_awq_available
21
+ from peft.tuners.lora.layer import LoraLayer
22
+ from peft.tuners.tuners_utils import BaseTunerLayer
23
+
24
+
25
+ if is_auto_awq_available():
26
+ from awq.modules.linear import WQLinear_GEMM
27
+
28
+
29
+ class AwqLoraLinear(torch.nn.Module, LoraLayer):
30
+ def __init__(
31
+ self,
32
+ base_layer,
33
+ adapter_name,
34
+ r: int = 0,
35
+ lora_alpha: int = 1,
36
+ lora_dropout: float = 0.0,
37
+ init_lora_weights: bool = True,
38
+ use_rslora: bool = False,
39
+ **kwargs,
40
+ ):
41
+ super().__init__()
42
+ LoraLayer.__init__(self, base_layer)
43
+
44
+ # self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter
45
+ # for backwards compatibility
46
+ self.quant_linear_module = base_layer
47
+
48
+ self._active_adapter = adapter_name
49
+ self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora)
50
+
51
+ def forward(self, x: torch.Tensor):
52
+ result = self.quant_linear_module(x)
53
+
54
+ if self.disable_adapters:
55
+ return result
56
+
57
+ for active_adapter in self.active_adapters:
58
+ if active_adapter not in self.lora_A.keys():
59
+ continue
60
+ lora_A = self.lora_A[active_adapter]
61
+ lora_B = self.lora_B[active_adapter]
62
+ dropout = self.lora_dropout[active_adapter]
63
+ scaling = self.scaling[active_adapter]
64
+
65
+ requires_conversion = not torch.is_autocast_enabled()
66
+ if requires_conversion:
67
+ expected_dtype = result.dtype
68
+ x = x.to(lora_A.weight.dtype)
69
+
70
+ output = lora_B(lora_A(dropout(x)))
71
+ if requires_conversion:
72
+ output = output.to(expected_dtype)
73
+ output = output * scaling
74
+ result = result + output
75
+ return result
76
+
77
+ def __repr__(self) -> str:
78
+ rep = super().__repr__()
79
+ return "lora." + rep
80
+
81
+
82
+ def dispatch_awq(
83
+ target: torch.nn.Module,
84
+ adapter_name: str,
85
+ **kwargs: Any,
86
+ ) -> Optional[torch.nn.Module]:
87
+ new_module = None
88
+
89
+ if isinstance(target, BaseTunerLayer):
90
+ target_base_layer = target.get_base_layer()
91
+ else:
92
+ target_base_layer = target
93
+
94
+ if is_auto_awq_available() and isinstance(target_base_layer, WQLinear_GEMM):
95
+ # Raise the error only at the dispatch level
96
+ AUTOAWQ_MINIMUM_VERSION = packaging.version.parse("0.2.0")
97
+ version_autoawq = packaging.version.parse(importlib_metadata.version("autoawq"))
98
+
99
+ if AUTOAWQ_MINIMUM_VERSION > version_autoawq:
100
+ raise ImportError(
101
+ f"Found an incompatible version of auto-awq. Found version {version_autoawq}, "
102
+ f"but only versions above {AUTOAWQ_MINIMUM_VERSION} are supported for PEFT."
103
+ )
104
+
105
+ new_module = AwqLoraLinear(target, adapter_name, **kwargs)
106
+ target.qweight = target_base_layer.qweight
107
+
108
+ return new_module
llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/bnb.py ADDED
@@ -0,0 +1,508 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from __future__ import annotations
15
+
16
+ import warnings
17
+ from typing import Any, Optional
18
+
19
+ import bitsandbytes as bnb
20
+ import torch
21
+
22
+ from peft.import_utils import is_bnb_4bit_available, is_bnb_available
23
+ from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
24
+ from peft.utils.integrations import dequantize_bnb_weight
25
+ from peft.utils.other import transpose
26
+
27
+ from .layer import LoraLayer
28
+
29
+
30
+ if is_bnb_available():
31
+
32
+ class Linear8bitLt(torch.nn.Module, LoraLayer):
33
+ # Lora implemented in a dense layer
34
+ def __init__(
35
+ self,
36
+ base_layer: torch.nn.Module,
37
+ adapter_name: str,
38
+ r: int = 0,
39
+ lora_alpha: int = 1,
40
+ lora_dropout: float = 0.0,
41
+ init_lora_weights: bool = True,
42
+ use_rslora: bool = False,
43
+ use_dora: bool = False,
44
+ **kwargs,
45
+ ) -> None:
46
+ super().__init__()
47
+ LoraLayer.__init__(self, base_layer)
48
+ self.fan_in_fan_out = False
49
+
50
+ self._active_adapter = adapter_name
51
+ self.update_layer(
52
+ adapter_name,
53
+ r,
54
+ lora_alpha=lora_alpha,
55
+ lora_dropout=lora_dropout,
56
+ init_lora_weights=init_lora_weights,
57
+ use_rslora=use_rslora,
58
+ use_dora=use_dora,
59
+ )
60
+
61
+ def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
62
+ """
63
+ Merge the active adapter weights into the base weights
64
+
65
+ Args:
66
+ safe_merge (`bool`, *optional*):
67
+ If True, the merge operation will be performed in a copy of the original weights and check for NaNs
68
+ before merging the weights. This is useful if you want to check if the merge operation will produce
69
+ NaNs. Defaults to `False`.
70
+ adapter_names (`list[str]`, *optional*):
71
+ The list of adapter names that should be merged. If None, all active adapters will be merged.
72
+ Defaults to `None`.
73
+ """
74
+ adapter_names = check_adapters_to_merge(self, adapter_names)
75
+ if not adapter_names:
76
+ # no adapter to merge
77
+ return
78
+
79
+ for active_adapter in adapter_names:
80
+ if active_adapter not in self.lora_A.keys():
81
+ continue
82
+
83
+ warnings.warn(
84
+ "Merge lora module to 8-bit linear may get different generations due to rounding errors."
85
+ )
86
+ lora_data = self.get_delta_weight(active_adapter)
87
+
88
+ weight = self.get_base_layer().weight
89
+ state = self.get_base_layer().state
90
+ if state.SCB is None:
91
+ state.SCB = weight.SCB
92
+
93
+ # Dequantize the result of identity matrix and int8 weight because bitsandbytes does not support int8
94
+ # dequantization directly
95
+ output = dequantize_bnb_weight(weight, state=state)
96
+ if not self.use_dora[active_adapter]:
97
+ w_data = output.to(lora_data.dtype).to(lora_data.device) + lora_data
98
+ else:
99
+ # handle dora
100
+ # since output already includes scaling, set it to 1 here
101
+ weight_norm = self._get_weight_norm(output, lora_data, scaling=1).detach()
102
+ # We need to cache weight_norm because it has to be based on the original weights. We
103
+ # cannot calculate it on the fly based on the merged weights when unmerging because its a
104
+ # different value
105
+ self._cache_store(f"{active_adapter}-weight_norm", weight_norm)
106
+ dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm
107
+ w_data = dora_factor.view(-1, 1) * (output + lora_data)
108
+
109
+ if safe_merge and not torch.isfinite(w_data).all():
110
+ raise ValueError(
111
+ f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
112
+ )
113
+
114
+ self.get_base_layer().weight = bnb.nn.Int8Params(
115
+ w_data.to("cpu"), requires_grad=False, has_fp16_weights=weight.has_fp16_weights
116
+ ).to(weight.device)
117
+ state.reset_grads()
118
+ self.merged_adapters.append(active_adapter)
119
+
120
+ def unmerge(self) -> None:
121
+ """
122
+ This method unmerges all merged adapter layers from the base weights.
123
+ """
124
+ if not self.merged:
125
+ warnings.warn("Already unmerged. Nothing to do.")
126
+ return
127
+
128
+ while len(self.merged_adapters) > 0:
129
+ active_adapter = self.merged_adapters.pop()
130
+ if active_adapter not in self.lora_A.keys():
131
+ continue
132
+ warnings.warn(
133
+ "Unmerge lora module to 8-bit linear may get different generations due to rounding errors."
134
+ )
135
+ lora_data = self.get_delta_weight(active_adapter)
136
+
137
+ weight = self.get_base_layer().weight
138
+ state = self.get_base_layer().state
139
+ if state.SCB is None:
140
+ state.SCB = weight.SCB
141
+ output = dequantize_bnb_weight(weight, state=state)
142
+
143
+ if not self.use_dora[active_adapter]:
144
+ w_data = output.to(lora_data.dtype).to(lora_data.device) - lora_data
145
+ else:
146
+ weight_norm = self._cache_pop(f"{active_adapter}-weight_norm")
147
+ dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm
148
+ w_data = output.data / dora_factor.view(-1, 1) - lora_data
149
+
150
+ self.get_base_layer().weight = bnb.nn.Int8Params(
151
+ w_data.to("cpu"), requires_grad=False, has_fp16_weights=weight.has_fp16_weights
152
+ ).to(weight.device)
153
+ state.reset_grads()
154
+
155
+ def get_delta_weight(self, adapter):
156
+ return (
157
+ transpose(
158
+ self.lora_B[adapter].weight @ self.lora_A[adapter].weight,
159
+ False,
160
+ )
161
+ * self.scaling[adapter]
162
+ )
163
+
164
+ def _mixed_batch_forward(
165
+ self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any
166
+ ) -> torch.Tensor:
167
+ # This is a special method that handles the case when users pass the argument `adapter_names`. This is an
168
+ # extra argument that allows mixing different adapters in the same batch at inference time.
169
+ result = self.base_layer(x, *args, **kwargs)
170
+
171
+ unique_adapters = set(adapter_names)
172
+ sub_batch_indices_list = []
173
+ for adapter in unique_adapters:
174
+ sub_batch_indices_list.append([index for index, item in enumerate(adapter_names) if item == adapter])
175
+
176
+ for i, active_adapter in enumerate(unique_adapters):
177
+ if active_adapter == "__base__":
178
+ continue
179
+ if active_adapter not in self.lora_A.keys():
180
+ continue
181
+
182
+ lora_A = self.lora_A[active_adapter]
183
+ lora_B = self.lora_B[active_adapter]
184
+ dropout = self.lora_dropout[active_adapter]
185
+ scaling = self.scaling[active_adapter]
186
+
187
+ requires_conversion = not torch.is_autocast_enabled()
188
+ if requires_conversion:
189
+ expected_dtype = result.dtype
190
+ compute_dtype = lora_A.weight.dtype
191
+ if x.dtype != compute_dtype:
192
+ x = x.to(compute_dtype)
193
+
194
+ # getting the sub-batch, passing it to LoRA layers and updating the corresponding indices of the linear
195
+ # layer output
196
+ sub_batch = x[sub_batch_indices_list[i]]
197
+ output = lora_B(lora_A(dropout(sub_batch))) * scaling
198
+ if requires_conversion:
199
+ output = output.to(expected_dtype)
200
+ result[sub_batch_indices_list[i]] += output
201
+
202
+ return result
203
+
204
+ def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
205
+ self._check_forward_args(x, *args, **kwargs)
206
+ adapter_names = kwargs.pop("adapter_names", None)
207
+
208
+ if self.disable_adapters:
209
+ if self.merged:
210
+ self.unmerge()
211
+ result = self.base_layer(x, *args, **kwargs)
212
+ elif adapter_names is not None:
213
+ result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs)
214
+ elif self.merged:
215
+ result = self.base_layer(x, *args, **kwargs)
216
+ else:
217
+ result = self.base_layer(x, *args, **kwargs)
218
+ for active_adapter in self.active_adapters:
219
+ if active_adapter not in self.lora_A.keys():
220
+ continue
221
+ lora_A = self.lora_A[active_adapter]
222
+ lora_B = self.lora_B[active_adapter]
223
+ dropout = self.lora_dropout[active_adapter]
224
+ scaling = self.scaling[active_adapter]
225
+
226
+ requires_conversion = not torch.is_autocast_enabled()
227
+ if requires_conversion:
228
+ expected_dtype = result.dtype
229
+ compute_dtype = lora_A.weight.dtype
230
+ if x.dtype != compute_dtype:
231
+ x = x.to(compute_dtype)
232
+
233
+ if not self.use_dora[active_adapter]:
234
+ output = lora_B(lora_A(dropout(x))) * scaling
235
+ else:
236
+ output = self._apply_dora(x, lora_A, lora_B, scaling, active_adapter)
237
+ if requires_conversion:
238
+ output = output.to(expected_dtype)
239
+
240
+ result = result + output
241
+
242
+ return result
243
+
244
+ def __repr__(self) -> str:
245
+ rep = super().__repr__()
246
+ return "lora." + rep
247
+
248
+ def dispatch_bnb_8bit(target: torch.nn.Module, adapter_name: str, **kwargs):
249
+ new_module = None
250
+
251
+ if isinstance(target, BaseTunerLayer):
252
+ target_base_layer = target.get_base_layer()
253
+ else:
254
+ target_base_layer = target
255
+
256
+ loaded_in_8bit = kwargs.get("loaded_in_8bit", False)
257
+ if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt):
258
+ eightbit_kwargs = kwargs.copy()
259
+ eightbit_kwargs.update(
260
+ {
261
+ "has_fp16_weights": target.state.has_fp16_weights,
262
+ "memory_efficient_backward": target.state.memory_efficient_backward,
263
+ "threshold": target.state.threshold,
264
+ "index": target.index,
265
+ }
266
+ )
267
+ new_module = Linear8bitLt(target, adapter_name, **eightbit_kwargs)
268
+
269
+ return new_module
270
+
271
+
272
+ if is_bnb_4bit_available():
273
+
274
+ class Linear4bit(torch.nn.Module, LoraLayer):
275
+ # Lora implemented in a dense layer
276
+ def __init__(
277
+ self,
278
+ base_layer: torch.nn.Module,
279
+ adapter_name: str,
280
+ r: int = 0,
281
+ lora_alpha: int = 1,
282
+ lora_dropout: float = 0.0,
283
+ init_lora_weights: bool = True,
284
+ use_rslora: bool = False,
285
+ use_dora: bool = False,
286
+ **kwargs,
287
+ ) -> None:
288
+ super().__init__()
289
+ LoraLayer.__init__(self, base_layer)
290
+ self.fan_in_fan_out = False
291
+
292
+ self._active_adapter = adapter_name
293
+ self.update_layer(
294
+ adapter_name,
295
+ r,
296
+ lora_alpha=lora_alpha,
297
+ lora_dropout=lora_dropout,
298
+ init_lora_weights=init_lora_weights,
299
+ use_rslora=use_rslora,
300
+ use_dora=use_dora,
301
+ )
302
+
303
+ def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
304
+ """
305
+ Merge the active adapter weights into the base weights
306
+
307
+ Args:
308
+ safe_merge (`bool`, *optional*):
309
+ If True, the merge operation will be performed in a copy of the original weights and check for NaNs
310
+ before merging the weights. This is useful if you want to check if the merge operation will produce
311
+ NaNs. Defaults to `False`.
312
+ adapter_names (`list[str]`, *optional*):
313
+ The list of adapter names that should be merged. If None, all active adapters will be merged.
314
+ Defaults to `None`.
315
+ """
316
+ adapter_names = check_adapters_to_merge(self, adapter_names)
317
+ if not adapter_names:
318
+ # no adapter to merge
319
+ return
320
+
321
+ for active_adapter in adapter_names:
322
+ if active_adapter not in self.lora_A.keys():
323
+ continue
324
+
325
+ warnings.warn(
326
+ "Merge lora module to 4-bit linear may get different generations due to rounding errors."
327
+ )
328
+ # Refer to https://gist.github.com/ChrisHayduk/1a53463331f52dca205e55982baf9930
329
+ weight = self.get_base_layer().weight
330
+ kwargs = weight.__dict__
331
+ lora_data = self.get_delta_weight(active_adapter)
332
+
333
+ output = dequantize_bnb_weight(weight, state=weight.quant_state)
334
+ if not self.use_dora[active_adapter]:
335
+ w_data = output + lora_data
336
+ else:
337
+ # handle dora
338
+ # since output already includes scaling, set it to 1 here
339
+ weight_norm = self._get_weight_norm(output, lora_data, scaling=1).detach()
340
+ # We need to cache weight_norm because it has to be based on the original weights. We
341
+ # cannot calculate it on the fly based on the merged weights when unmerging because its a
342
+ # different value
343
+ self._cache_store(f"{active_adapter}-weight_norm", weight_norm)
344
+ dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm
345
+ w_data = dora_factor.view(-1, 1) * (output + lora_data)
346
+
347
+ if safe_merge and not torch.isfinite(w_data).all():
348
+ raise ValueError(
349
+ f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
350
+ )
351
+ if "bnb_quantized" in kwargs:
352
+ kwargs["bnb_quantized"] = False
353
+ self.get_base_layer().weight = bnb.nn.Params4bit(w_data.to("cpu"), requires_grad=False, **kwargs).to(
354
+ weight.device
355
+ )
356
+ self.merged_adapters.append(active_adapter)
357
+
358
+ def unmerge(self) -> None:
359
+ """
360
+ This method unmerges all merged adapter layers from the base weights.
361
+ """
362
+ if not self.merged:
363
+ warnings.warn("Already unmerged. Nothing to do.")
364
+ return
365
+
366
+ while len(self.merged_adapters) > 0:
367
+ active_adapter = self.merged_adapters.pop()
368
+ if active_adapter not in self.lora_A.keys():
369
+ continue
370
+ warnings.warn(
371
+ "Unmerge lora module to 4-bit linear may get different generations due to rounding errors."
372
+ )
373
+
374
+ lora_data = self.get_delta_weight(active_adapter)
375
+ weight = self.get_base_layer().weight
376
+ kwargs = weight.__dict__
377
+ output = dequantize_bnb_weight(weight, state=weight.quant_state)
378
+
379
+ if not self.use_dora[active_adapter]:
380
+ w_data = output - lora_data
381
+ else:
382
+ weight_norm = self._cache_pop(f"{active_adapter}-weight_norm")
383
+ dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm
384
+ w_data = output.data / dora_factor.view(-1, 1) - lora_data
385
+
386
+ if "bnb_quantized" in kwargs:
387
+ kwargs["bnb_quantized"] = False
388
+ self.get_base_layer().weight = bnb.nn.Params4bit(w_data.to("cpu"), requires_grad=False, **kwargs).to(
389
+ weight.device
390
+ )
391
+
392
+ def get_delta_weight(self, adapter):
393
+ return (
394
+ transpose(
395
+ self.lora_B[adapter].weight @ self.lora_A[adapter].weight,
396
+ False,
397
+ )
398
+ * self.scaling[adapter]
399
+ )
400
+
401
+ def _mixed_batch_forward(
402
+ self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any
403
+ ) -> torch.Tensor:
404
+ # This is a special method that handles the case when users pass the argument `adapter_names`. This is an
405
+ # extra argument that allows mixing different adapters in the same batch at inference time.
406
+ result = self.base_layer(x, *args, **kwargs)
407
+
408
+ unique_adapters = set(adapter_names)
409
+ sub_batch_indices_list = []
410
+ for adapter in unique_adapters:
411
+ sub_batch_indices_list.append([index for index, item in enumerate(adapter_names) if item == adapter])
412
+
413
+ for i, active_adapter in enumerate(unique_adapters):
414
+ if active_adapter == "__base__":
415
+ continue
416
+ if active_adapter not in self.lora_A.keys():
417
+ continue
418
+
419
+ lora_A = self.lora_A[active_adapter]
420
+ lora_B = self.lora_B[active_adapter]
421
+ dropout = self.lora_dropout[active_adapter]
422
+ scaling = self.scaling[active_adapter]
423
+
424
+ requires_conversion = not torch.is_autocast_enabled()
425
+ if requires_conversion:
426
+ expected_dtype = result.dtype
427
+ x = x.to(lora_A.weight.dtype)
428
+
429
+ # getting the sub-batch, passing it to LoRA layers and updating the corresponding indices of the linear
430
+ # layer output
431
+ sub_batch = x[sub_batch_indices_list[i]]
432
+ output = lora_B(lora_A(dropout(sub_batch))) * scaling
433
+ if requires_conversion:
434
+ output = output.to(expected_dtype)
435
+ result[sub_batch_indices_list[i]] += output
436
+
437
+ return result
438
+
439
+ def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
440
+ self._check_forward_args(x, *args, **kwargs)
441
+ adapter_names = kwargs.pop("adapter_names", None)
442
+
443
+ if self.disable_adapters:
444
+ if self.merged:
445
+ self.unmerge()
446
+ result = self.base_layer(x, *args, **kwargs)
447
+ elif adapter_names is not None:
448
+ result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs)
449
+ elif self.merged:
450
+ result = self.base_layer(x, *args, **kwargs)
451
+ else:
452
+ result = self.base_layer(x, *args, **kwargs)
453
+ # As per Tim Dettmers, for 4bit, we need to defensively clone here.
454
+ # The reason is that in some cases, an error can occur that backprop
455
+ # does not work on a manipulated view. This issue may be solved with
456
+ # newer PyTorch versions but this would need extensive testing to be
457
+ # sure.
458
+ result = result.clone()
459
+
460
+ for active_adapter in self.active_adapters:
461
+ if active_adapter not in self.lora_A.keys():
462
+ continue
463
+ lora_A = self.lora_A[active_adapter]
464
+ lora_B = self.lora_B[active_adapter]
465
+ dropout = self.lora_dropout[active_adapter]
466
+ scaling = self.scaling[active_adapter]
467
+
468
+ requires_conversion = not torch.is_autocast_enabled()
469
+ if requires_conversion:
470
+ expected_dtype = result.dtype
471
+ x = x.to(lora_A.weight.dtype)
472
+
473
+ if not self.use_dora[active_adapter]:
474
+ output = lora_B(lora_A(dropout(x))) * scaling
475
+ else:
476
+ output = self._apply_dora(x, lora_A, lora_B, scaling, active_adapter)
477
+ if requires_conversion:
478
+ output = output.to(expected_dtype)
479
+
480
+ result = result + output
481
+
482
+ return result
483
+
484
+ def __repr__(self) -> str:
485
+ rep = super().__repr__()
486
+ return "lora." + rep
487
+
488
+ def dispatch_bnb_4bit(target: torch.nn.Module, adapter_name: str, **kwargs):
489
+ new_module = None
490
+
491
+ if isinstance(target, BaseTunerLayer):
492
+ target_base_layer = target.get_base_layer()
493
+ else:
494
+ target_base_layer = target
495
+
496
+ loaded_in_4bit = kwargs.get("loaded_in_4bit", False)
497
+ if loaded_in_4bit and is_bnb_4bit_available() and isinstance(target_base_layer, bnb.nn.Linear4bit):
498
+ fourbit_kwargs = kwargs.copy()
499
+ fourbit_kwargs.update(
500
+ {
501
+ "compute_dtype": target_base_layer.compute_dtype,
502
+ "compress_statistics": target_base_layer.weight.compress_statistics,
503
+ "quant_type": target_base_layer.weight.quant_type,
504
+ }
505
+ )
506
+ new_module = Linear4bit(target, adapter_name, **fourbit_kwargs)
507
+
508
+ return new_module
llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/config.py ADDED
@@ -0,0 +1,299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from __future__ import annotations
16
+
17
+ from dataclasses import dataclass, field
18
+ from typing import Literal, Optional, Union
19
+
20
+ from peft.config import PeftConfig
21
+ from peft.utils import PeftType
22
+
23
+
24
+ @dataclass
25
+ class LoftQConfig:
26
+ """
27
+ This is the sub-configuration class to store the configuration of a [`LoraModel`].
28
+
29
+ Args:
30
+ bits_pattern (`dict`): The mapping from layer names or regexp expression to bits which are different from the
31
+ default bits specified by `bits`. For example, `{model.decoder.layers.0.encoder_attn.k_proj: 2`}.
32
+ bits (`int`): Quantization bits for LoftQ.
33
+ iter (`int`): Alternating iterations for LoftQ.
34
+ fake (`bool`): True: use fp16/fp32; used for first time to save weights. False: use bitsandbytes 4bit linear
35
+ models. weights can't be saved. Recommend to set to True, save the weights and load the saved weights in 4
36
+ bits.
37
+ """
38
+
39
+ loftq_bits: int = field(default=4, metadata={"help": "Quantization bits for LoftQ"})
40
+ loftq_iter: int = field(default=1, metadata={"help": "Alternating iterations for LoftQ"})
41
+
42
+
43
+ @dataclass
44
+ class LoraConfig(PeftConfig):
45
+ """
46
+ This is the configuration class to store the configuration of a [`LoraModel`].
47
+
48
+ Args:
49
+ r (`int`):
50
+ Lora attention dimension (the "rank").
51
+ target_modules (`Optional[Union[List[str], str]]`):
52
+ The names of the modules to apply the adapter to. If this is specified, only the modules with the specified
53
+ names will be replaced. When passing a string, a regex match will be performed. When passing a list of
54
+ strings, either an exact match will be performed or it is checked if the name of the module ends with any
55
+ of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen,
56
+ excluding the output layer. If this is not specified, modules will be chosen according to the model
57
+ architecture. If the architecture is not known, an error will be raised -- in this case, you should specify
58
+ the target modules manually.
59
+ lora_alpha (`int`):
60
+ The alpha parameter for Lora scaling.
61
+ lora_dropout (`float`):
62
+ The dropout probability for Lora layers.
63
+ fan_in_fan_out (`bool`):
64
+ Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses
65
+ `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`.
66
+ bias (`str`):
67
+ Bias type for LoRA. Can be 'none', 'all' or 'lora_only'. If 'all' or 'lora_only', the corresponding biases
68
+ will be updated during training. Be aware that this means that, even when disabling the adapters, the model
69
+ will not produce the same output as the base model would have without adaptation.
70
+ use_rslora (`bool`):
71
+ When set to True, uses <a href='https://doi.org/10.48550/arXiv.2312.03732'>Rank-Stabilized LoRA</a> which
72
+ sets the adapter scaling factor to `lora_alpha/math.sqrt(r)`, since it was proven to work better.
73
+ Otherwise, it will use the original default value of `lora_alpha/r`.
74
+ modules_to_save (`List[str]`):
75
+ List of modules apart from adapter layers to be set as trainable and saved in the final checkpoint.
76
+ init_lora_weights (`bool` | `Literal["gaussian", "loftq"]`):
77
+ How to initialize the weights of the adapter layers. Passing True (default) results in the default
78
+ initialization from the reference implementation from Microsoft. Passing 'gaussian' results in Gaussian
79
+ initialization scaled by the LoRA rank for linear and layers. Setting the initialization to False leads to
80
+ completely random initialization and is discouraged. Pass `'loftq'` to use LoftQ initialization.
81
+ layers_to_transform (`Union[List[int], int]`):
82
+ The layer indices to transform. If a list of ints is passed, it will apply the adapter to the layer indices
83
+ that are specified in this list. If a single integer is passed, it will apply the transformations on the
84
+ layer at this index.
85
+ layers_pattern (`str`):
86
+ The layer pattern name, used only if `layers_to_transform` is different from `None`.
87
+ rank_pattern (`dict`):
88
+ The mapping from layer names or regexp expression to ranks which are different from the default rank
89
+ specified by `r`.
90
+ alpha_pattern (`dict`):
91
+ The mapping from layer names or regexp expression to alphas which are different from the default alpha
92
+ specified by `lora_alpha`.
93
+ megatron_config (`Optional[dict]`):
94
+ The TransformerConfig arguments for Megatron. It is used to create LoRA's parallel linear layer. You can
95
+ get it like this, `core_transformer_config_from_args(get_args())`, these two functions being from Megatron.
96
+ The arguments will be used to initialize the TransformerConfig of Megatron. You need to specify this
97
+ parameter when you want to apply LoRA to the ColumnParallelLinear and RowParallelLinear layers of megatron.
98
+ megatron_core (`Optional[str]`):
99
+ The core module from Megatron to use, defaults to `"megatron.core"`.
100
+ loftq_config (`Optional[LoftQConfig]`):
101
+ The configuration of LoftQ. If this is not None, then LoftQ will be used to quantize the backbone weights
102
+ and initialize Lora layers. Also pass `init_lora_weights='loftq'`. Note that you should not pass a
103
+ quantized model in this case, as LoftQ will quantize the model itself.
104
+ use_dora (`bool`):
105
+ Enable 'Weight-Decomposed Low-Rank Adaptation' (DoRA). This technique decomposes the updates of the weights
106
+ into two parts, magnitude and direction. Direction is handled by normal LoRA, whereas the magnitude is
107
+ handled by a separate learnable parameter. This can improve the performance of LoRA especially at low
108
+ ranks. Right now, DoRA only supports linear and Conv2D layers. DoRA introduces a bigger overhead than pure
109
+ LoRA, so it is recommended to merge weights for inference. For more information, see
110
+ https://arxiv.org/abs/2402.09353.
111
+ layer_replication(`List[Tuple[int, int]]`):
112
+ Build a new stack of layers by stacking the original model layers according to the ranges specified. This
113
+ allows expanding (or shrinking) the model without duplicating the base model weights. The new layers will
114
+ all have separate LoRA adapters attached to them.
115
+ """
116
+
117
+ r: int = field(default=8, metadata={"help": "Lora attention dimension"})
118
+ target_modules: Optional[Union[list[str], str]] = field(
119
+ default=None,
120
+ metadata={
121
+ "help": (
122
+ "List of module names or regex expression of the module names to replace with LoRA."
123
+ "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'."
124
+ "This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer."
125
+ "If not specified, modules will be chosen according to the model architecture, If the architecture is "
126
+ "not known, an error will be raised -- in this case, you should specify the target modules manually."
127
+ ),
128
+ },
129
+ )
130
+ lora_alpha: int = field(default=8, metadata={"help": "Lora alpha"})
131
+ lora_dropout: float = field(default=0.0, metadata={"help": "Lora dropout"})
132
+ fan_in_fan_out: bool = field(
133
+ default=False,
134
+ metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"},
135
+ )
136
+ bias: Literal["none", "all", "lora_only"] = field(
137
+ default="none", metadata={"help": "Bias type for Lora. Can be 'none', 'all' or 'lora_only'"}
138
+ )
139
+ use_rslora: bool = field(
140
+ default=False,
141
+ metadata={
142
+ "help": (
143
+ "When set to True, uses Rank-Stabilized LoRA doi.org/10.48550/arXiv.2312.03732"
144
+ " which sets the adapter scaling factor to `lora_alpha/math.sqrt(r)`, since it"
145
+ " was proven to work better. Otherwise, it will use the original default"
146
+ " value of `lora_alpha/r`."
147
+ )
148
+ },
149
+ )
150
+ modules_to_save: Optional[list[str]] = field(
151
+ default=None,
152
+ metadata={
153
+ "help": "List of modules apart from LoRA layers to be set as trainable and saved in the final checkpoint. "
154
+ "For example, in Sequence Classification or Token Classification tasks, "
155
+ "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved."
156
+ },
157
+ )
158
+ init_lora_weights: bool | Literal["gaussian", "loftq"] = field(
159
+ default=True,
160
+ metadata={
161
+ "help": (
162
+ "How to initialize the weights of the LoRA layers. Passing True (default) results in the default "
163
+ "initialization from the reference implementation from Microsoft. Passing 'gaussian' results "
164
+ "in Gaussian initialization scaled by the LoRA rank for linear and layers. Setting the initialization "
165
+ "to False leads to completely random initialization and is discouraged."
166
+ "Pass `'loftq'` to use LoftQ initialization"
167
+ ),
168
+ },
169
+ )
170
+ layers_to_transform: Optional[Union[list[int], int]] = field(
171
+ default=None,
172
+ metadata={
173
+ "help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index. "
174
+ "This only works when target_modules is a list of str."
175
+ },
176
+ )
177
+ layers_pattern: Optional[Union[list[str], str]] = field(
178
+ default=None,
179
+ metadata={
180
+ "help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern."
181
+ "This only works when target_modules is a list of str."
182
+ },
183
+ )
184
+ rank_pattern: Optional[dict] = field(
185
+ default_factory=dict,
186
+ metadata={
187
+ "help": (
188
+ "The mapping from layer names or regexp expression to ranks which are different from the default rank specified by `r`. "
189
+ "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 8`}"
190
+ )
191
+ },
192
+ )
193
+ alpha_pattern: Optional[dict] = field(
194
+ default_factory=dict,
195
+ metadata={
196
+ "help": (
197
+ "The mapping from layer names or regexp expression to alphas which are different from the default alpha specified by `lora_alpha`. "
198
+ "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 32`}"
199
+ )
200
+ },
201
+ )
202
+ megatron_config: Optional[dict] = field(
203
+ default=None,
204
+ metadata={
205
+ "help": (
206
+ "The TransformerConfig from Megatron. It is used to create LoRA's parallel linear layer."
207
+ "You can get it like this, `core_transformer_config_from_args(get_args())`, "
208
+ "these two functions being from Megatron."
209
+ "You need to specify this parameter when you want to apply LoRA to the ColumnParallelLinear and "
210
+ "RowParallelLinear layers of megatron."
211
+ "It should be noted that we may not be able to use the `save_pretrained` and `from_pretrained` "
212
+ "functions, because TransformerConfig may not necessarily be serialized."
213
+ "But when using megatron, we can use `get_peft_model_state_dict` function and "
214
+ "megatron's framework, they can also save and load models and configurations."
215
+ )
216
+ },
217
+ )
218
+ megatron_core: Optional[str] = field(
219
+ default="megatron.core",
220
+ metadata={
221
+ "help": (
222
+ "The core module from Megatron, it is used to create LoRA's parallel linear layer. "
223
+ "It only needs to be passed in when you need to use your own modified megatron core module. "
224
+ "Otherwise, it will use the default value `megatron.core`. "
225
+ )
226
+ },
227
+ )
228
+ # dict type is used when loading config.json
229
+ loftq_config: Union[LoftQConfig, dict] = field(
230
+ default_factory=dict,
231
+ metadata={
232
+ "help": (
233
+ "The configuration of LoftQ. If this is passed, then LoftQ will be used to quantize the backbone "
234
+ "weights and initialize Lora layers. Also set `init_lora_weights='loftq'` in this case."
235
+ )
236
+ },
237
+ )
238
+ use_dora: bool = field(
239
+ default=False,
240
+ metadata={
241
+ "help": (
242
+ "Enable 'Weight-Decomposed Low-Rank Adaptation' (DoRA). This technique decomposes the updates of the "
243
+ "weights into two parts, magnitude and direction. Direction is handled by normal LoRA, whereas the "
244
+ "magnitude is handled by a separate learnable parameter. This can improve the performance of LoRA, "
245
+ "especially at low ranks. Right now, DoRA only supports linear and Conv2D layers. DoRA introduces a bigger"
246
+ "overhead than pure LoRA, so it is recommended to merge weights for inference. For more information, "
247
+ "see https://arxiv.org/abs/2402.09353."
248
+ )
249
+ },
250
+ )
251
+ # Enables replicating layers in a model to expand it to a larger model.
252
+ layer_replication: Optional[list[tuple[int, int]]] = field(
253
+ default=None,
254
+ metadata={
255
+ "help": (
256
+ "This enables using LoRA to effectively expand a transformer model to a larger size by repeating some layers. "
257
+ "The transformation handles models (currently Llama, Bert or Falcon compatible architectures) with "
258
+ "a module list in the model which it modifies to expand the number of modules. "
259
+ "Base weights are shared so the memory usage is close to the original model. The intended use is these base weights "
260
+ "remain fixed during finetuning but each layer has a separate LoRA adapter so the layers can be specialed via "
261
+ "the adapter layers fit during fine tuning."
262
+ "The format is a list of [start, end) pairs which specify the layer ranges to stack. For example:\n"
263
+ " Original model has 5 layers labelled by their position in the model: `[0, 1, 2, 3, 4]`\n"
264
+ " layer_replication: `[[0, 4], [2, 5]]`\n"
265
+ " Final model will have this arrangement of original layers: `[0, 1, 2, 3, 2, 3, 4]`\n"
266
+ "This format is based on what is used for pass-through merges in mergekit. It makes it simple to select sequential "
267
+ "ranges of a model and stack them while reusing layers at either end of each sequence."
268
+ )
269
+ },
270
+ )
271
+
272
+ def __post_init__(self):
273
+ self.peft_type = PeftType.LORA
274
+ self.target_modules = (
275
+ set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
276
+ )
277
+ # if target_modules is a regex expression, then layers_to_transform should be None
278
+ if isinstance(self.target_modules, str) and self.layers_to_transform is not None:
279
+ raise ValueError("`layers_to_transform` cannot be used when `target_modules` is a str.")
280
+
281
+ # if target_modules is a regex expression, then layers_pattern should be None
282
+ if isinstance(self.target_modules, str) and self.layers_pattern is not None:
283
+ raise ValueError("`layers_pattern` cannot be used when `target_modules` is a str.")
284
+
285
+ if self.use_dora and self.megatron_config:
286
+ raise ValueError("DoRA does not support megatron_core, please set `use_dora=False`.")
287
+
288
+ # handle init_lora_weights and loftq_config
289
+ if self.init_lora_weights == "loftq":
290
+ import importlib
291
+
292
+ if not importlib.util.find_spec("scipy"):
293
+ raise ImportError("The required package 'scipy' is not installed. Please install it to continue.")
294
+ if self.loftq_config is None:
295
+ raise ValueError("`loftq_config` must be specified when `init_lora_weights` is 'loftq'.")
296
+
297
+ # convert loftq_config to dict
298
+ if self.loftq_config and not isinstance(self.loftq_config, dict):
299
+ self.loftq_config = vars(self.loftq_config)
llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/layer.py ADDED
@@ -0,0 +1,1066 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from __future__ import annotations
15
+
16
+ import math
17
+ import warnings
18
+ from typing import Any, Optional, Union
19
+
20
+ import torch
21
+ import torch.nn as nn
22
+ import torch.nn.functional as F
23
+ from transformers.pytorch_utils import Conv1D
24
+
25
+ from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
26
+ from peft.utils.integrations import dequantize_bnb_weight, gather_params_ctx
27
+ from peft.utils.other import transpose
28
+
29
+ from .config import LoraConfig
30
+
31
+
32
+ class LoraLayer(BaseTunerLayer):
33
+ # All names of layers that may contain (trainable) adapter weights
34
+ adapter_layer_names = ("lora_A", "lora_B", "lora_embedding_A", "lora_embedding_B")
35
+ # All names of other parameters that may contain adapter-related parameters
36
+ other_param_names = ("r", "lora_alpha", "scaling", "lora_dropout")
37
+
38
+ def __init__(self, base_layer: nn.Module, **kwargs) -> None:
39
+ self.base_layer = base_layer
40
+ self.r = {}
41
+ self.lora_alpha = {}
42
+ self.scaling = {}
43
+ self.lora_dropout = nn.ModuleDict({})
44
+ self.lora_A = nn.ModuleDict({})
45
+ self.lora_B = nn.ModuleDict({})
46
+ # For Embedding layer
47
+ self.lora_embedding_A = nn.ParameterDict({})
48
+ self.lora_embedding_B = nn.ParameterDict({})
49
+ # Mark the weight as unmerged
50
+ self._disable_adapters = False
51
+ self.merged_adapters = []
52
+ self.use_dora: dict[str, bool] = {}
53
+ self.lora_magnitude_vector: Optional[torch.nn.ParameterDict] = None # for DoRA
54
+ self._caches: dict[str, Any] = {}
55
+ self.kwargs = kwargs
56
+
57
+ base_layer = self.get_base_layer()
58
+ if isinstance(base_layer, nn.Linear):
59
+ in_features, out_features = base_layer.in_features, base_layer.out_features
60
+ elif isinstance(base_layer, nn.Conv2d):
61
+ in_features, out_features = base_layer.in_channels, base_layer.out_channels
62
+ elif isinstance(base_layer, nn.Embedding):
63
+ in_features, out_features = base_layer.num_embeddings, base_layer.embedding_dim
64
+ elif isinstance(base_layer, Conv1D):
65
+ in_features, out_features = (
66
+ base_layer.weight.ds_shape if hasattr(base_layer.weight, "ds_shape") else base_layer.weight.shape
67
+ )
68
+ elif hasattr(base_layer, "infeatures") and hasattr(base_layer, "outfeatures"):
69
+ # QuantLinear
70
+ in_features, out_features = base_layer.infeatures, base_layer.outfeatures
71
+ elif hasattr(base_layer, "input_size") and hasattr(base_layer, "output_size"):
72
+ # Megatron ColumnParallelLinear,RowParallelLinear
73
+ in_features, out_features = base_layer.input_size, base_layer.output_size
74
+ elif hasattr(base_layer, "codebooks") and base_layer.__class__.__name__ == "QuantizedLinear":
75
+ # AQLM QuantLinear
76
+ in_features, out_features = base_layer.in_features, base_layer.out_features
77
+ elif hasattr(base_layer, "w_bit") and base_layer.__class__.__name__ == "WQLinear_GEMM":
78
+ # Awq layers
79
+ in_features, out_features = base_layer.in_features, base_layer.out_features
80
+ else:
81
+ raise ValueError(f"Unsupported layer type {type(base_layer)}")
82
+
83
+ self.in_features = in_features
84
+ self.out_features = out_features
85
+
86
+ def update_layer(
87
+ self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora: bool = False
88
+ ):
89
+ # This code works for linear layers, override for other layer types
90
+ if r <= 0:
91
+ raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
92
+
93
+ self.r[adapter_name] = r
94
+ self.lora_alpha[adapter_name] = lora_alpha
95
+ if lora_dropout > 0.0:
96
+ lora_dropout_layer = nn.Dropout(p=lora_dropout)
97
+ else:
98
+ lora_dropout_layer = nn.Identity()
99
+
100
+ self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer}))
101
+ # Actual trainable parameters
102
+ self.lora_A[adapter_name] = nn.Linear(self.in_features, r, bias=False)
103
+ self.lora_B[adapter_name] = nn.Linear(r, self.out_features, bias=False)
104
+ if use_rslora:
105
+ self.scaling[adapter_name] = lora_alpha / math.sqrt(r)
106
+ else:
107
+ self.scaling[adapter_name] = lora_alpha / r
108
+
109
+ if init_lora_weights == "loftq":
110
+ self.loftq_init(adapter_name)
111
+ elif init_lora_weights:
112
+ self.reset_lora_parameters(adapter_name, init_lora_weights)
113
+
114
+ # check weight and qweight (for GPTQ)
115
+ for weight_name in ("weight", "qweight"):
116
+ weight = getattr(self.get_base_layer(), weight_name, None)
117
+ if weight is not None:
118
+ # the layer is already completely initialized, this is an update
119
+ if weight.dtype.is_floating_point or weight.dtype.is_complex:
120
+ self.to(weight.device, dtype=weight.dtype)
121
+ else:
122
+ self.to(weight.device)
123
+ break
124
+
125
+ if use_dora:
126
+ self.dora_init(adapter_name)
127
+ self.use_dora[adapter_name] = True
128
+ else:
129
+ self.use_dora[adapter_name] = False
130
+
131
+ self.set_adapter(self.active_adapters)
132
+
133
+ def reset_lora_parameters(self, adapter_name, init_lora_weights):
134
+ if init_lora_weights is False:
135
+ return
136
+
137
+ if adapter_name in self.lora_A.keys():
138
+ if init_lora_weights is True:
139
+ # initialize A the same way as the default for nn.Linear and B to zero
140
+ # https://github.com/microsoft/LoRA/blob/a0a92e0f26c067cf94747bdbf1ce73793fa44d19/loralib/layers.py#L124
141
+ nn.init.kaiming_uniform_(self.lora_A[adapter_name].weight, a=math.sqrt(5))
142
+ elif init_lora_weights.lower() == "gaussian":
143
+ nn.init.normal_(self.lora_A[adapter_name].weight, std=1 / self.r[adapter_name])
144
+ else:
145
+ raise ValueError(f"Unknown initialization {init_lora_weights=}")
146
+ nn.init.zeros_(self.lora_B[adapter_name].weight)
147
+ if adapter_name in self.lora_embedding_A.keys():
148
+ # initialize a the same way as the default for nn.linear and b to zero
149
+ nn.init.zeros_(self.lora_embedding_A[adapter_name])
150
+ nn.init.normal_(self.lora_embedding_B[adapter_name])
151
+
152
+ def loftq_init(self, adapter_name):
153
+ from peft.utils.loftq_utils import loftq_init
154
+
155
+ weight = self.get_base_layer().weight
156
+ kwargs = {
157
+ "num_bits": self.kwargs.get("loftq_bits", 4),
158
+ "reduced_rank": self.r[adapter_name],
159
+ "num_iter": self.kwargs.get("loftq_iter", 1),
160
+ }
161
+
162
+ qweight, lora_A, lora_B = loftq_init(weight, **kwargs)
163
+ if adapter_name in self.lora_A.keys():
164
+ # initialize A the same way as the default for nn.Linear and B to zero
165
+ self.lora_A[adapter_name].weight.data = lora_A
166
+ self.lora_B[adapter_name].weight.data = lora_B
167
+ if adapter_name in self.lora_embedding_A.keys():
168
+ # initialize a the same way as the default for nn.linear and b to zero
169
+ self.lora_embedding_A[adapter_name].weight.data = lora_A
170
+ self.lora_embedding_B[adapter_name].weight.data = lora_B
171
+ self.get_base_layer().weight.data = qweight
172
+
173
+ def _get_weight_norm(self, weight, lora_weight, scaling) -> torch.Tensor:
174
+ # calculate L2 norm of weight matrix, column-wise
175
+ weight = weight + scaling * lora_weight
176
+ weight_norm = torch.linalg.norm(weight, dim=1).to(weight.dtype)
177
+ return weight_norm
178
+
179
+ def dora_init(self, adapter_name: str) -> None:
180
+ lora_A = self.lora_A[adapter_name]
181
+ lora_B = self.lora_B[adapter_name]
182
+ scaling = self.scaling[adapter_name]
183
+ with gather_params_ctx(self.get_base_layer()):
184
+ weight = self.get_base_layer().weight
185
+ quant_state = getattr(self.get_base_layer(), "state", None)
186
+ weight = dequantize_bnb_weight(weight, state=quant_state) # no-op if not bnb
187
+ if weight.data.ndim == 4: # For handling LoRAs applied to Conv2Ds.
188
+ lora_weight = torch.mm(lora_B.weight.flatten(start_dim=1), lora_A.weight.flatten(start_dim=1))
189
+ lora_weight = lora_weight.reshape(weight.shape)
190
+ else:
191
+ lora_weight = lora_B.weight @ lora_A.weight
192
+ weight_norm = self._get_weight_norm(weight, lora_weight, scaling)
193
+ self.lora_magnitude_vector = nn.ParameterDict()
194
+ self.lora_magnitude_vector[adapter_name] = nn.Parameter(weight_norm, requires_grad=True)
195
+ # add lora_magnitude_vector to the list of learnable parameters
196
+ self.adapter_layer_names = self.adapter_layer_names[:] + ("lora_magnitude_vector",)
197
+
198
+ def _cache_store(self, key: str, value: Any) -> None:
199
+ self._caches[key] = value
200
+
201
+ def _cache_pop(self, key: str) -> Any:
202
+ value = self._caches.pop(key)
203
+ return value
204
+
205
+ def _apply_dora(self, x, lora_A, lora_B, scaling, active_adapter):
206
+ """
207
+ For DoRA, calculate the extra output from LoRA with DoRA applied. This should be added on top of the base layer
208
+ output.
209
+ """
210
+ lora_weight = lora_B.weight @ lora_A.weight
211
+ magnitude = self.lora_magnitude_vector[active_adapter]
212
+ weight = self.get_base_layer().weight
213
+ quant_state = getattr(self.get_base_layer(), "state", None)
214
+ weight = dequantize_bnb_weight(weight, state=quant_state) # no-op if not bnb
215
+ weight = weight.to(x.dtype)
216
+ weight_norm = self._get_weight_norm(weight, lora_weight, scaling)
217
+ # see section 4.3 of DoRA (https://arxiv.org/abs/2402.09353)
218
+ # "[...] we suggest treating ||V +∆V ||_c in
219
+ # Eq. (5) as a constant, thereby detaching it from the gradient
220
+ # graph. This means that while ||V + ∆V ||_c dynamically
221
+ # reflects the updates of ∆V , it won’t receive any gradient
222
+ # during backpropagation"
223
+ weight_norm = weight_norm.detach()
224
+ mag_norm_scale = (magnitude / weight_norm).view(1, -1)
225
+ result_dora = (mag_norm_scale - 1) * (
226
+ F.linear(x, transpose(weight, self.fan_in_fan_out))
227
+ ) + mag_norm_scale * lora_B(lora_A(x)) * scaling
228
+
229
+ # Note: Computation could potentially be accelerated by using the code below instead of calculating X@W again.
230
+ # This is only correct if dropout=0, otherwise results will differ:
231
+ # https://github.com/huggingface/peft/pull/1474#issuecomment-1964682771
232
+ # bias = self.get_base_layer().bias
233
+ # if bias is not None:
234
+ # result = result - bias
235
+ # result = mag_norm_scale * result + mag_norm_scale * lora_B(lora_A(x)) * scaling
236
+ # if bias is not None:
237
+ # result = result + bias
238
+
239
+ return result_dora
240
+
241
+ def set_scale(self, adapter, scale):
242
+ if adapter not in self.scaling:
243
+ # Ignore the case where the adapter is not in the layer
244
+ return
245
+ self.scaling[adapter] = scale * self.lora_alpha[adapter] / self.r[adapter]
246
+
247
+ def scale_layer(self, scale: float) -> None:
248
+ if scale == 1:
249
+ return
250
+
251
+ for active_adapter in self.active_adapters:
252
+ if active_adapter not in self.lora_A.keys():
253
+ continue
254
+
255
+ self.scaling[active_adapter] *= scale
256
+
257
+ def unscale_layer(self, scale=None) -> None:
258
+ for active_adapter in self.active_adapters:
259
+ if active_adapter not in self.lora_A.keys():
260
+ continue
261
+
262
+ if scale is None:
263
+ self.scaling[active_adapter] = self.lora_alpha[active_adapter] / self.r[active_adapter]
264
+ else:
265
+ self.scaling[active_adapter] /= scale
266
+
267
+ def _check_forward_args(self, x, *args, **kwargs):
268
+ """Check if the arguments are compatible with the configs and state of the model"""
269
+ adapter_names = kwargs.get("adapter_names", None)
270
+ if adapter_names is None:
271
+ return
272
+
273
+ if len(x) != len(adapter_names):
274
+ msg = (
275
+ "Length of `adapter_names` should be the same as the number of inputs, but got "
276
+ f"{len(adapter_names)} and {len(x)} respectively."
277
+ )
278
+ raise ValueError(msg)
279
+
280
+ if self.merged:
281
+ # It is unclear what would be the right thing to do if users pass adapter_names and there are merged
282
+ # adapters. Therefore, it is better to raise an error in this case.
283
+ msg = "Cannot pass `adapter_names` when there are merged adapters, please call `unmerge_adapter` first."
284
+ raise ValueError(msg)
285
+
286
+ unique_adapters = set(self.active_adapters)
287
+ for adapter_name in unique_adapters:
288
+ if self.use_dora.get(adapter_name, False):
289
+ msg = "Cannot pass `adapter_names` when DoRA is enabled."
290
+ raise ValueError(msg)
291
+
292
+ def _mixed_batch_forward(
293
+ self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any
294
+ ) -> torch.Tensor:
295
+ # This is a special method that handles the case when users pass the argument `adapter_names`. This is an
296
+ # extra argument that allows mixing different adapters in the same batch at inference time.
297
+ result = self.base_layer(x, *args, **kwargs)
298
+ torch_result_dtype = result.dtype
299
+
300
+ unique_adapters = set(adapter_names)
301
+ sub_batch_indices_list = []
302
+ for adapter in unique_adapters:
303
+ sub_batch_indices_list.append([index for index, item in enumerate(adapter_names) if item == adapter])
304
+
305
+ for i, active_adapter in enumerate(unique_adapters):
306
+ if active_adapter == "__base__":
307
+ continue
308
+ if active_adapter not in self.lora_A.keys():
309
+ continue
310
+
311
+ lora_A = self.lora_A[active_adapter]
312
+ lora_B = self.lora_B[active_adapter]
313
+ dropout = self.lora_dropout[active_adapter]
314
+ scaling = self.scaling[active_adapter]
315
+
316
+ # getting the sub-batch, passing it to LoRA layers and updating the corresponding indices of the linear
317
+ # layer output
318
+ sub_batch = x[sub_batch_indices_list[i]].to(lora_A.weight.dtype)
319
+ lora_output = lora_B(lora_A(dropout(sub_batch))) * scaling
320
+ result[sub_batch_indices_list[i]] += lora_output.to(torch_result_dtype)
321
+
322
+ return result
323
+
324
+
325
+ # Below code is based on https://github.com/microsoft/LoRA/blob/main/loralib/layers.py
326
+ # and modified to work with PyTorch FSDP
327
+
328
+
329
+ # ------------------------------------------------------------------------------------------
330
+ # Copyright (c) Microsoft Corporation. All rights reserved.
331
+ # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
332
+ # ------------------------------------------------------------------------------------------
333
+
334
+
335
+ class Linear(nn.Module, LoraLayer):
336
+ # Lora implemented in a dense layer
337
+ def __init__(
338
+ self,
339
+ base_layer,
340
+ adapter_name: str,
341
+ r: int = 0,
342
+ lora_alpha: int = 1,
343
+ lora_dropout: float = 0.0,
344
+ fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out)
345
+ is_target_conv_1d_layer: bool = False,
346
+ init_lora_weights: Union[bool, str] = True,
347
+ use_rslora: bool = False,
348
+ use_dora: bool = False,
349
+ **kwargs,
350
+ ) -> None:
351
+ super().__init__()
352
+ LoraLayer.__init__(self, base_layer, **kwargs)
353
+ self.fan_in_fan_out = fan_in_fan_out
354
+
355
+ self._active_adapter = adapter_name
356
+ self.update_layer(
357
+ adapter_name,
358
+ r,
359
+ lora_alpha=lora_alpha,
360
+ lora_dropout=lora_dropout,
361
+ init_lora_weights=init_lora_weights,
362
+ use_rslora=use_rslora,
363
+ use_dora=use_dora,
364
+ )
365
+ self.is_target_conv_1d_layer = is_target_conv_1d_layer
366
+
367
+ def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
368
+ """
369
+ Merge the active adapter weights into the base weights
370
+
371
+ Args:
372
+ safe_merge (`bool`, *optional*):
373
+ If True, the merge operation will be performed in a copy of the original weights and check for NaNs
374
+ before merging the weights. This is useful if you want to check if the merge operation will produce
375
+ NaNs. Defaults to `False`.
376
+ adapter_names (`list[str]`, *optional*):
377
+ The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
378
+ to `None`.
379
+ """
380
+ adapter_names = check_adapters_to_merge(self, adapter_names)
381
+ if not adapter_names:
382
+ # no adapter to merge
383
+ return
384
+
385
+ for active_adapter in adapter_names:
386
+ if active_adapter in self.lora_A.keys():
387
+ base_layer = self.get_base_layer()
388
+ if safe_merge:
389
+ # Note that safe_merge will be slower than the normal merge
390
+ # because of the copy operation.
391
+ orig_weights = base_layer.weight.data.clone()
392
+ delta_weight = self.get_delta_weight(active_adapter)
393
+ if not self.use_dora[active_adapter]:
394
+ orig_weights = orig_weights + delta_weight
395
+ else:
396
+ # handle dora
397
+ # since delta_weight already includes scaling, set it to 1 here
398
+ weight_norm = self._get_weight_norm(orig_weights, delta_weight, scaling=1).detach()
399
+ # We need to cache weight_norm because it has to be based on the original weights. We
400
+ # cannot calculate it on the fly based on the merged weights when unmerging because its a
401
+ # different value
402
+ self._cache_store(f"{active_adapter}-weight_norm", weight_norm)
403
+ dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm
404
+ orig_weights = dora_factor.view(-1, 1) * (orig_weights + delta_weight)
405
+
406
+ if not torch.isfinite(orig_weights).all():
407
+ raise ValueError(
408
+ f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
409
+ )
410
+
411
+ base_layer.weight.data = orig_weights
412
+ else:
413
+ delta_weight = self.get_delta_weight(active_adapter)
414
+ if not self.use_dora[active_adapter]:
415
+ base_layer.weight.data = base_layer.weight.data + delta_weight
416
+ else:
417
+ # handle dora
418
+ # since delta_weight already includes scaling, set it to 1 here
419
+ weight_norm = self._get_weight_norm(base_layer.weight, delta_weight, scaling=1).detach()
420
+ # We need to cache weight_norm because it has to be based on the original weights. We
421
+ # cannot calculate it on the fly based on the merged weights when unmerging because its a
422
+ # different value
423
+ self._cache_store(f"{active_adapter}-weight_norm", weight_norm)
424
+ dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm
425
+ new_weight = dora_factor.view(-1, 1) * (base_layer.weight.data + delta_weight)
426
+ base_layer.weight.data = new_weight
427
+
428
+ self.merged_adapters.append(active_adapter)
429
+
430
+ def unmerge(self) -> None:
431
+ """
432
+ This method unmerges all merged adapter layers from the base weights.
433
+ """
434
+ if not self.merged:
435
+ warnings.warn("Already unmerged. Nothing to do.")
436
+ return
437
+ while len(self.merged_adapters) > 0:
438
+ active_adapter = self.merged_adapters.pop()
439
+ if active_adapter in self.lora_A.keys():
440
+ weight = self.get_base_layer().weight
441
+ delta_weight = self.get_delta_weight(active_adapter)
442
+ if not self.use_dora[active_adapter]:
443
+ weight.data -= delta_weight
444
+ else:
445
+ weight_norm = self._cache_pop(f"{active_adapter}-weight_norm")
446
+ dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm
447
+ weight_orig = weight.data / dora_factor.view(-1, 1) - delta_weight
448
+ weight.data = weight_orig
449
+
450
+ def get_delta_weight(self, adapter) -> torch.Tensor:
451
+ """
452
+ Compute the delta weight for the given adapter.
453
+
454
+ Args:
455
+ adapter (str):
456
+ The name of the adapter for which the delta weight should be computed.
457
+ """
458
+ device = self.lora_B[adapter].weight.device
459
+ dtype = self.lora_B[adapter].weight.dtype
460
+
461
+ # In case users wants to merge the adapter weights that are in
462
+ # float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to
463
+ # float16 because the `@` and matmul operation in general is not supported in torch + cpu + fp16.
464
+ cast_to_fp32 = device.type == "cpu" and dtype == torch.float16
465
+
466
+ weight_A = self.lora_A[adapter].weight
467
+ weight_B = self.lora_B[adapter].weight
468
+
469
+ if cast_to_fp32:
470
+ weight_A = weight_A.float()
471
+ weight_B = weight_B.float()
472
+
473
+ output_tensor = transpose(weight_B @ weight_A, self.fan_in_fan_out) * self.scaling[adapter]
474
+
475
+ if cast_to_fp32:
476
+ output_tensor = output_tensor.to(dtype=dtype)
477
+
478
+ # cast back the weights
479
+ self.lora_A[adapter].weight.data = weight_A.to(dtype)
480
+ self.lora_B[adapter].weight.data = weight_B.to(dtype)
481
+
482
+ return output_tensor
483
+
484
+ def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
485
+ self._check_forward_args(x, *args, **kwargs)
486
+ adapter_names = kwargs.pop("adapter_names", None)
487
+
488
+ if self.disable_adapters:
489
+ if self.merged:
490
+ self.unmerge()
491
+ result = self.base_layer(x, *args, **kwargs)
492
+ elif adapter_names is not None:
493
+ result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs)
494
+ elif self.merged:
495
+ result = self.base_layer(x, *args, **kwargs)
496
+ else:
497
+ result = self.base_layer(x, *args, **kwargs)
498
+ torch_result_dtype = result.dtype
499
+ for active_adapter in self.active_adapters:
500
+ if active_adapter not in self.lora_A.keys():
501
+ continue
502
+ lora_A = self.lora_A[active_adapter]
503
+ lora_B = self.lora_B[active_adapter]
504
+ dropout = self.lora_dropout[active_adapter]
505
+ scaling = self.scaling[active_adapter]
506
+ x = x.to(lora_A.weight.dtype)
507
+
508
+ if not self.use_dora[active_adapter]:
509
+ result = result + lora_B(lora_A(dropout(x))) * scaling
510
+ else:
511
+ x = dropout(x)
512
+ result = result + self._apply_dora(x, lora_A, lora_B, scaling, active_adapter)
513
+
514
+ result = result.to(torch_result_dtype)
515
+
516
+ return result
517
+
518
+ def __repr__(self) -> str:
519
+ rep = super().__repr__()
520
+ return "lora." + rep
521
+
522
+
523
+ class Embedding(nn.Module, LoraLayer):
524
+ # LoRA implemented in a Embedding layer
525
+ def __init__(
526
+ self,
527
+ base_layer: nn.Module,
528
+ adapter_name: str,
529
+ r: int = 0,
530
+ lora_alpha: int = 1,
531
+ lora_dropout: float = 0.0,
532
+ init_lora_weights: Union[bool, str] = True,
533
+ use_rslora: bool = False,
534
+ use_dora: bool = False,
535
+ **kwargs,
536
+ ) -> None:
537
+ super().__init__()
538
+ LoraLayer.__init__(self, base_layer)
539
+
540
+ if use_dora:
541
+ raise ValueError(f"{self.__class__.__name__} does not support DoRA yet, please set it to False")
542
+
543
+ self._active_adapter = adapter_name
544
+ self.update_layer(
545
+ adapter_name,
546
+ r,
547
+ lora_alpha=lora_alpha,
548
+ lora_dropout=lora_dropout,
549
+ init_lora_weights=init_lora_weights,
550
+ use_rslora=use_rslora,
551
+ use_dora=use_dora,
552
+ )
553
+
554
+ def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora):
555
+ if r <= 0:
556
+ raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
557
+
558
+ self.r[adapter_name] = r
559
+ self.lora_alpha[adapter_name] = lora_alpha
560
+ if lora_dropout > 0.0:
561
+ lora_dropout_layer = nn.Dropout(p=lora_dropout)
562
+ else:
563
+ lora_dropout_layer = nn.Identity()
564
+
565
+ self.lora_dropout[adapter_name] = lora_dropout_layer
566
+ # Actual trainable parameters
567
+ weight_A = torch.randn((r, self.in_features))
568
+ weight_B = torch.randn((self.out_features, r))
569
+ self.lora_embedding_A[adapter_name] = nn.Parameter(weight_A)
570
+ self.lora_embedding_B[adapter_name] = nn.Parameter(weight_B)
571
+ if use_rslora:
572
+ self.scaling[adapter_name] = lora_alpha / math.sqrt(r)
573
+ else:
574
+ self.scaling[adapter_name] = lora_alpha / r
575
+
576
+ if init_lora_weights == "loftq":
577
+ self.loftq_init(adapter_name)
578
+ elif init_lora_weights:
579
+ self.reset_lora_parameters(adapter_name, init_lora_weights)
580
+
581
+ base_layer = self.get_base_layer()
582
+ weight = getattr(base_layer, "weight", None)
583
+ if weight is not None:
584
+ # the layer is already completely initialized, this is an update
585
+ self.to(base_layer.weight.device, dtype=weight.dtype)
586
+
587
+ self.set_adapter(self.active_adapters)
588
+
589
+ def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
590
+ """
591
+ Merge the active adapter weights into the base weights
592
+
593
+ Args:
594
+ safe_merge (`bool`, *optional*):
595
+ If True, the merge operation will be performed in a copy of the original weights and check for NaNs
596
+ before merging the weights. This is useful if you want to check if the merge operation will produce
597
+ NaNs. Defaults to `False`.
598
+ adapter_names (`list[str]`, *optional*):
599
+ The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
600
+ to `None`.
601
+ """
602
+ adapter_names = check_adapters_to_merge(self, adapter_names)
603
+ if not adapter_names:
604
+ # no adapter to merge
605
+ return
606
+
607
+ for active_adapter in adapter_names:
608
+ if active_adapter in self.lora_embedding_A.keys():
609
+ base_layer = self.get_base_layer()
610
+ if safe_merge:
611
+ # Note that safe_merge will be slower than the normal merge
612
+ # because of the copy operation.
613
+ orig_weights = base_layer.weight.data.clone()
614
+ orig_weights = orig_weights + self.get_delta_weight(active_adapter)
615
+
616
+ if not torch.isfinite(orig_weights).all():
617
+ raise ValueError(
618
+ f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
619
+ )
620
+
621
+ base_layer.weight.data = orig_weights
622
+ else:
623
+ base_layer.weight.data = base_layer.weight.data + self.get_delta_weight(active_adapter)
624
+ self.merged_adapters.append(active_adapter)
625
+
626
+ def unmerge(self) -> None:
627
+ """
628
+ This method unmerges all merged adapter layers from the base weights.
629
+ """
630
+ if not self.merged:
631
+ warnings.warn("Already unmerged. Nothing to do.")
632
+ return
633
+ while len(self.merged_adapters) > 0:
634
+ active_adapter = self.merged_adapters.pop()
635
+ if active_adapter in self.lora_embedding_A.keys():
636
+ self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter)
637
+
638
+ def get_delta_weight(self, adapter) -> torch.Tensor:
639
+ """
640
+ Compute the delta weight for the given adapter.
641
+
642
+ Args:
643
+ adapter (str):
644
+ The name of the adapter for which the delta weight should be computed.
645
+ """
646
+ device = self.lora_embedding_B[adapter].device
647
+ dtype = self.lora_embedding_A[adapter].dtype
648
+
649
+ # In case users wants to merge the adapter weights that are in
650
+ # float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to
651
+ # float16 because the `@` and matmul operation in general is not supported in torch + cpu + fp16.
652
+ cast_to_fp32 = device.type == "cpu" and dtype == torch.float16
653
+
654
+ weight_A = self.lora_embedding_A[adapter]
655
+ weight_B = self.lora_embedding_B[adapter]
656
+
657
+ if cast_to_fp32:
658
+ weight_A = weight_A.float()
659
+ weight_B = weight_B.float()
660
+
661
+ output_tensor = transpose(weight_B @ weight_A, True) * self.scaling[adapter]
662
+
663
+ if cast_to_fp32:
664
+ output_tensor = output_tensor.to(dtype=dtype)
665
+
666
+ # cast back the weights
667
+ self.lora_embedding_A[adapter] = weight_A.to(dtype)
668
+ self.lora_embedding_B[adapter] = weight_B.to(dtype)
669
+
670
+ return output_tensor
671
+
672
+ def _mixed_batch_forward(
673
+ self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any
674
+ ) -> torch.Tensor:
675
+ # This is a special method that handles the case when users pass the argument `adapter_names`. This is an
676
+ # extra argument that allows mixing different adapters in the same batch at inference time.
677
+ result = self.base_layer(x, *args, **kwargs)
678
+
679
+ unique_adapters = set(adapter_names)
680
+ sub_batch_indices_list = []
681
+ for adapter in unique_adapters:
682
+ sub_batch_indices_list.append([index for index, item in enumerate(adapter_names) if item == adapter])
683
+
684
+ for i, active_adapter in enumerate(unique_adapters):
685
+ if active_adapter == "__base__":
686
+ continue
687
+ if active_adapter not in self.lora_embedding_A.keys():
688
+ continue
689
+
690
+ embedding_A = self.lora_embedding_A[active_adapter].T
691
+ embedding_B = self.lora_embedding_B[active_adapter].T
692
+ scaling = self.scaling[active_adapter]
693
+
694
+ # getting the sub-batch, passing it to LoRA layers and updating the corresponding indices of the linear
695
+ # layer output
696
+ sub_batch = x[sub_batch_indices_list[i]]
697
+ after_A = self._embed(sub_batch, embedding_A)
698
+ result[sub_batch_indices_list[i]] += (after_A @ embedding_B) * scaling
699
+
700
+ return result
701
+
702
+ def _embed(self, input: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
703
+ base_layer = self.get_base_layer()
704
+ return F.embedding(
705
+ input,
706
+ weight,
707
+ padding_idx=base_layer.padding_idx,
708
+ max_norm=base_layer.max_norm,
709
+ norm_type=base_layer.norm_type,
710
+ scale_grad_by_freq=base_layer.scale_grad_by_freq,
711
+ sparse=base_layer.sparse,
712
+ )
713
+
714
+ def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
715
+ # TODO: no dtype conversion here, unlike in Linear, is that correct?
716
+ self._check_forward_args(x, *args, **kwargs)
717
+ adapter_names = kwargs.pop("adapter_names", None)
718
+
719
+ if self.disable_adapters:
720
+ if self.merged:
721
+ self.unmerge()
722
+ result = self.base_layer(x, *args, **kwargs)
723
+ elif adapter_names is not None:
724
+ result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs)
725
+ elif self.merged:
726
+ result = self.base_layer(x, *args, **kwargs)
727
+ else:
728
+ result = self.base_layer(x, *args, **kwargs)
729
+ torch_result_dtype = result.dtype
730
+ for active_adapter in self.active_adapters:
731
+ if active_adapter not in self.lora_embedding_A:
732
+ continue
733
+ embedding_A = self.lora_embedding_A[active_adapter].T
734
+ embedding_B = self.lora_embedding_B[active_adapter].T
735
+ scaling = self.scaling[active_adapter]
736
+ after_A = self._embed(x, embedding_A)
737
+ result = result + (after_A @ embedding_B) * scaling
738
+ result = result.to(torch_result_dtype)
739
+
740
+ return result
741
+
742
+ def __repr__(self) -> str:
743
+ rep = super().__repr__()
744
+ return "lora." + rep
745
+
746
+
747
+ class Conv2d(nn.Module, LoraLayer):
748
+ # Lora implemented in a conv2d layer
749
+ def __init__(
750
+ self,
751
+ base_layer: nn.Module,
752
+ adapter_name: str,
753
+ r: int = 0,
754
+ lora_alpha: int = 1,
755
+ lora_dropout: float = 0.0,
756
+ init_lora_weights: Union[bool, str] = True,
757
+ use_rslora: bool = False,
758
+ use_dora: bool = False,
759
+ **kwargs,
760
+ ) -> None:
761
+ super().__init__()
762
+ LoraLayer.__init__(self, base_layer)
763
+
764
+ self._active_adapter = adapter_name
765
+ self.update_layer(
766
+ adapter_name,
767
+ r,
768
+ lora_alpha=lora_alpha,
769
+ lora_dropout=lora_dropout,
770
+ init_lora_weights=init_lora_weights,
771
+ use_rslora=use_rslora,
772
+ use_dora=use_dora,
773
+ )
774
+
775
+ def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora):
776
+ if r <= 0:
777
+ raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
778
+
779
+ self.r[adapter_name] = r
780
+ self.lora_alpha[adapter_name] = lora_alpha
781
+ if lora_dropout > 0.0:
782
+ lora_dropout_layer = nn.Dropout(p=lora_dropout)
783
+ else:
784
+ lora_dropout_layer = nn.Identity()
785
+
786
+ self.lora_dropout[adapter_name] = lora_dropout_layer
787
+ # Actual trainable parameters
788
+ base_layer = self.get_base_layer()
789
+ kernel_size = base_layer.kernel_size
790
+ stride = base_layer.stride
791
+ padding = base_layer.padding
792
+ self.lora_A[adapter_name] = nn.Conv2d(self.in_features, r, kernel_size, stride, padding, bias=False)
793
+ self.lora_B[adapter_name] = nn.Conv2d(r, self.out_features, (1, 1), (1, 1), bias=False)
794
+ if use_rslora:
795
+ self.scaling[adapter_name] = lora_alpha / math.sqrt(r)
796
+ else:
797
+ self.scaling[adapter_name] = lora_alpha / r
798
+
799
+ if init_lora_weights == "loftq":
800
+ self.loftq_init(adapter_name)
801
+ elif init_lora_weights:
802
+ self.reset_lora_parameters(adapter_name, init_lora_weights)
803
+
804
+ weight = getattr(base_layer, "weight", None)
805
+ if weight is not None:
806
+ # the layer is already completely initialized, this is an update
807
+ self.to(base_layer.weight.device, dtype=weight.dtype)
808
+
809
+ if use_dora:
810
+ self.dora_init(adapter_name)
811
+ self.use_dora[adapter_name] = True
812
+ else:
813
+ self.use_dora[adapter_name] = False
814
+
815
+ self.set_adapter(self.active_adapters)
816
+
817
+ def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
818
+ """
819
+ Merge the active adapter weights inside the base weights
820
+
821
+ Args:
822
+ safe_merge (`bool`, *optional*):
823
+ If True, the merge operation will be performed in a copy of the original weights and check for NaNs
824
+ before merging the weights. This is useful if you want to check if the merge operation will produce
825
+ NaNs. Defaults to `False`.
826
+ adapter_names (`list[str]`, *optional*):
827
+ The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
828
+ to `None`.
829
+ """
830
+ adapter_names = check_adapters_to_merge(self, adapter_names)
831
+ if not adapter_names:
832
+ # no adapter to merge
833
+ return
834
+
835
+ for active_adapter in adapter_names:
836
+ if active_adapter in self.lora_A.keys():
837
+ base_layer = self.get_base_layer()
838
+ if safe_merge:
839
+ # Note that safe_merge will be slower than the normal merge
840
+ # because of the copy operation.
841
+ orig_weights = base_layer.weight.data.clone()
842
+ delta_weight = self.get_delta_weight(active_adapter)
843
+
844
+ if not self.use_dora[active_adapter]:
845
+ orig_weights = orig_weights + delta_weight
846
+ else:
847
+ # handle dora
848
+ # since delta_weight already includes scaling, set it to 1 here
849
+ weight_norm = self._get_weight_norm(orig_weights, delta_weight, scaling=1).detach()
850
+ # We need to cache weight_norm because it has to be based on the original weights. We
851
+ # cannot calculate it on the fly based on the merged weights when unmerging because its a
852
+ # different value
853
+ self._cache_store(f"{active_adapter}-weight_norm", weight_norm)
854
+ dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm
855
+ orig_weights = dora_factor.view(-1, 1, 1, 1) * (orig_weights + delta_weight)
856
+
857
+ if not torch.isfinite(orig_weights).all():
858
+ raise ValueError(
859
+ f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
860
+ )
861
+ base_layer.weight.data = orig_weights
862
+ else:
863
+ delta_weight = self.get_delta_weight(active_adapter)
864
+ if not self.use_dora[active_adapter]:
865
+ base_layer.weight.data = base_layer.weight.data + delta_weight
866
+ else:
867
+ # handle dora
868
+ # since delta_weight already includes scaling, set it to 1 here
869
+ weight_norm = self._get_weight_norm(base_layer.weight, delta_weight, scaling=1).detach()
870
+ # We need to cache weight_norm because it has to be based on the original weights. We
871
+ # cannot calculate it on the fly based on the merged weights when unmerging because its a
872
+ # different value
873
+ self._cache_store(f"{active_adapter}-weight_norm", weight_norm)
874
+ dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm
875
+ new_weight = dora_factor.view(-1, 1, 1, 1) * (base_layer.weight.data + delta_weight)
876
+ base_layer.weight.data = new_weight
877
+
878
+ self.merged_adapters.append(active_adapter)
879
+
880
+ def unmerge(self) -> None:
881
+ """
882
+ This method unmerges all merged adapter layers from the base weights.
883
+ """
884
+ if not self.merged:
885
+ warnings.warn("Already unmerged. Nothing to do.")
886
+ return
887
+ while len(self.merged_adapters) > 0:
888
+ active_adapter = self.merged_adapters.pop()
889
+ if active_adapter in self.lora_A.keys():
890
+ weight = self.get_base_layer().weight
891
+ delta_weight = self.get_delta_weight(active_adapter)
892
+ if not self.use_dora[active_adapter]:
893
+ weight.data -= delta_weight
894
+ else:
895
+ weight_norm = self._cache_pop(f"{active_adapter}-weight_norm")
896
+ dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm
897
+ weight_orig = weight.data / dora_factor.view(-1, 1, 1, 1) - delta_weight
898
+ weight.data = weight_orig
899
+
900
+ def get_delta_weight(self, adapter) -> torch.Tensor:
901
+ """
902
+ Compute the delta weight for the given adapter.
903
+
904
+ Args:
905
+ adapter (str):
906
+ The name of the adapter for which the delta weight should be computed.
907
+ """
908
+ device = self.lora_B[adapter].weight.device
909
+ dtype = self.lora_A[adapter].weight.dtype
910
+
911
+ # In case users wants to merge the adapter weights that are in
912
+ # float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to
913
+ # float16 because the `@` and matmul operation in general is not supported in torch + cpu + fp16.
914
+ cast_to_fp32 = device.type == "cpu" and dtype == torch.float16
915
+
916
+ weight_A = self.lora_A[adapter].weight
917
+ weight_B = self.lora_B[adapter].weight
918
+
919
+ if cast_to_fp32:
920
+ weight_A = weight_A.float()
921
+ weight_B = weight_B.float()
922
+
923
+ # https://github.com/bmaltais/kohya_ss/blob/feb6728762a8f463d15ba936d189d4c3abfaa1ab/networks/lora.py#L117
924
+ if self.get_base_layer().weight.size()[2:4] == (1, 1):
925
+ # conv2d 1x1
926
+ output_tensor = (weight_B.squeeze(3).squeeze(2) @ weight_A.squeeze(3).squeeze(2)).unsqueeze(2).unsqueeze(
927
+ 3
928
+ ) * self.scaling[adapter]
929
+ else:
930
+ # conv2d 3x3
931
+ output_tensor = (
932
+ F.conv2d(
933
+ weight_A.permute(1, 0, 2, 3),
934
+ weight_B,
935
+ ).permute(1, 0, 2, 3)
936
+ * self.scaling[adapter]
937
+ )
938
+
939
+ if cast_to_fp32:
940
+ output_tensor = output_tensor.to(dtype=dtype)
941
+
942
+ # cast back the weights
943
+ self.lora_A[adapter].weight.data = weight_A.to(dtype)
944
+ self.lora_B[adapter].weight.data = weight_B.to(dtype)
945
+
946
+ return output_tensor
947
+
948
+ def _get_weight_norm(self, weight, lora_weight, scaling) -> torch.Tensor:
949
+ # calculate L2 norm of weight matrix, channel-wise
950
+ weight = weight + scaling * lora_weight
951
+ # the following is needed to have compatibility with the 4D weight tensors of Conv2D
952
+ weight_norm = weight.norm(p=2, dim=(1, 2, 3), keepdim=True).transpose(1, 0)
953
+ return weight_norm
954
+
955
+ def _apply_dora(self, x, lora_A, lora_B, scaling, active_adapter):
956
+ """
957
+ For DoRA, calculate the extra output from LoRA with DoRA applied. This should be added on top of the base layer
958
+ output.
959
+ """
960
+ base_layer = self.get_base_layer()
961
+ weight = base_layer.weight
962
+ lora_weight = torch.mm(lora_B.weight.flatten(start_dim=1), lora_A.weight.flatten(start_dim=1))
963
+ lora_weight = lora_weight.reshape(weight.shape)
964
+ magnitude = self.lora_magnitude_vector[active_adapter]
965
+ weight_norm = self._get_weight_norm(weight, lora_weight, scaling)
966
+ # see section 4.3 of DoRA (https://arxiv.org/abs/2402.09353)
967
+ # "[...] we suggest treating ||V +∆V ||_c in
968
+ # Eq. (5) as a constant, thereby detaching it from the gradient
969
+ # graph. This means that while ||V + ∆V ||_c dynamically
970
+ # reflects the updates of ∆V , it won’t receive any gradient
971
+ # during backpropagation"
972
+ weight_norm = weight_norm.detach()
973
+ mag_norm_scale = magnitude / weight_norm
974
+ result_dora = (mag_norm_scale - 1) * (
975
+ F.conv2d(
976
+ x,
977
+ weight,
978
+ bias=None,
979
+ stride=base_layer.stride,
980
+ padding=base_layer.padding,
981
+ dilation=base_layer.dilation,
982
+ groups=base_layer.groups,
983
+ )
984
+ ) + mag_norm_scale * lora_B(lora_A(x)) * scaling
985
+
986
+ return result_dora
987
+
988
+ def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
989
+ self._check_forward_args(x, *args, **kwargs)
990
+ adapter_names = kwargs.pop("adapter_names", None)
991
+
992
+ if self.disable_adapters:
993
+ if self.merged:
994
+ self.unmerge()
995
+ result = self.base_layer(x, *args, **kwargs)
996
+ elif adapter_names is not None:
997
+ result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs)
998
+ elif self.merged:
999
+ result = self.base_layer(x, *args, **kwargs)
1000
+ else:
1001
+ result = self.base_layer(x, *args, **kwargs)
1002
+ torch_result_dtype = result.dtype
1003
+
1004
+ for active_adapter in self.active_adapters:
1005
+ if active_adapter not in self.lora_A.keys():
1006
+ continue
1007
+ lora_A = self.lora_A[active_adapter]
1008
+ lora_B = self.lora_B[active_adapter]
1009
+ dropout = self.lora_dropout[active_adapter]
1010
+ scaling = self.scaling[active_adapter]
1011
+ x = x.to(lora_A.weight.dtype)
1012
+
1013
+ if not self.use_dora[active_adapter]:
1014
+ result = result + lora_B(lora_A(dropout(x))) * scaling
1015
+ else:
1016
+ x = dropout(x)
1017
+ result = result + self._apply_dora(x, lora_A, lora_B, scaling, active_adapter)
1018
+
1019
+ result = result.to(torch_result_dtype)
1020
+ return result
1021
+
1022
+ def __repr__(self) -> str:
1023
+ rep = super().__repr__()
1024
+ return "lora." + rep
1025
+
1026
+
1027
+ def dispatch_default(
1028
+ target: torch.nn.Module,
1029
+ adapter_name: str,
1030
+ lora_config: LoraConfig,
1031
+ **kwargs,
1032
+ ) -> Optional[torch.nn.Module]:
1033
+ new_module = None
1034
+
1035
+ if isinstance(target, BaseTunerLayer):
1036
+ target_base_layer = target.get_base_layer()
1037
+ else:
1038
+ target_base_layer = target
1039
+
1040
+ if isinstance(target_base_layer, torch.nn.Embedding):
1041
+ embedding_kwargs = kwargs.copy()
1042
+ embedding_kwargs.pop("fan_in_fan_out", None)
1043
+ embedding_kwargs.update(lora_config.loftq_config)
1044
+ new_module = Embedding(target, adapter_name, **embedding_kwargs)
1045
+ elif isinstance(target_base_layer, torch.nn.Conv2d):
1046
+ kwargs.update(lora_config.loftq_config)
1047
+ new_module = Conv2d(target, adapter_name, **kwargs)
1048
+ elif isinstance(target_base_layer, torch.nn.Linear):
1049
+ if kwargs["fan_in_fan_out"]:
1050
+ warnings.warn(
1051
+ "fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. "
1052
+ "Setting fan_in_fan_out to False."
1053
+ )
1054
+ kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False
1055
+ kwargs.update(lora_config.loftq_config)
1056
+ new_module = Linear(target, adapter_name, **kwargs)
1057
+ elif isinstance(target_base_layer, Conv1D):
1058
+ if not kwargs["fan_in_fan_out"]:
1059
+ warnings.warn(
1060
+ "fan_in_fan_out is set to False but the target module is `Conv1D`. " "Setting fan_in_fan_out to True."
1061
+ )
1062
+ kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = True
1063
+ kwargs.update(lora_config.loftq_config)
1064
+ new_module = Linear(target, adapter_name, is_target_conv_1d_layer=True, **kwargs)
1065
+
1066
+ return new_module
llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/model.py ADDED
@@ -0,0 +1,793 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from __future__ import annotations
15
+
16
+ import math
17
+ import operator
18
+ import re
19
+ import warnings
20
+ from contextlib import contextmanager
21
+ from dataclasses import asdict, replace
22
+ from enum import Enum
23
+ from functools import partial, reduce
24
+ from itertools import chain
25
+ from typing import Literal, Optional
26
+
27
+ import torch
28
+ from torch import nn
29
+ from tqdm import tqdm
30
+
31
+ from peft.import_utils import is_bnb_4bit_available, is_bnb_available
32
+ from peft.tuners.tuners_utils import (
33
+ BaseTuner,
34
+ BaseTunerLayer,
35
+ check_target_module_exists,
36
+ onload_layer,
37
+ replicate_layers,
38
+ )
39
+ from peft.utils import (
40
+ TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING,
41
+ ModulesToSaveWrapper,
42
+ _freeze_adapter,
43
+ _get_submodules,
44
+ get_quantization_config,
45
+ )
46
+ from peft.utils.merge_utils import dare_linear, dare_ties, magnitude_prune, task_arithmetic, ties
47
+
48
+ from .aqlm import dispatch_aqlm
49
+ from .awq import dispatch_awq
50
+ from .config import LoraConfig
51
+ from .gptq import dispatch_gptq
52
+ from .layer import Conv2d, LoraLayer, dispatch_default
53
+ from .tp_layer import dispatch_megatron
54
+
55
+
56
+ def _adapter_names_pre_forward_hook(target, args, kwargs, adapter_names):
57
+ # pre-forward hook to inject the adapter_names argument when using mixed adapter batches inference
58
+ kwargs["adapter_names"] = adapter_names
59
+ return args, kwargs
60
+
61
+
62
+ class LoraModel(BaseTuner):
63
+ """
64
+ Creates Low Rank Adapter (LoRA) model from a pretrained transformers model.
65
+
66
+ The method is described in detail in https://arxiv.org/abs/2106.09685.
67
+
68
+ Args:
69
+ model ([`torch.nn.Module`]): The model to be adapted.
70
+ config ([`LoraConfig`]): The configuration of the Lora model.
71
+ adapter_name (`str`): The name of the adapter, defaults to `"default"`.
72
+
73
+ Returns:
74
+ `torch.nn.Module`: The Lora model.
75
+
76
+ Example:
77
+
78
+ ```py
79
+ >>> from transformers import AutoModelForSeq2SeqLM
80
+ >>> from peft import LoraModel, LoraConfig
81
+
82
+ >>> config = LoraConfig(
83
+ ... task_type="SEQ_2_SEQ_LM",
84
+ ... r=8,
85
+ ... lora_alpha=32,
86
+ ... target_modules=["q", "v"],
87
+ ... lora_dropout=0.01,
88
+ ... )
89
+
90
+ >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
91
+ >>> lora_model = LoraModel(model, config, "default")
92
+ ```
93
+
94
+ ```py
95
+ >>> import torch
96
+ >>> import transformers
97
+ >>> from peft import LoraConfig, PeftModel, get_peft_model, prepare_model_for_kbit_training
98
+
99
+ >>> rank = ...
100
+ >>> target_modules = ["q_proj", "k_proj", "v_proj", "out_proj", "fc_in", "fc_out", "wte"]
101
+ >>> config = LoraConfig(
102
+ ... r=4, lora_alpha=16, target_modules=target_modules, lora_dropout=0.1, bias="none", task_type="CAUSAL_LM"
103
+ ... )
104
+ >>> quantization_config = transformers.BitsAndBytesConfig(load_in_8bit=True)
105
+
106
+ >>> tokenizer = transformers.AutoTokenizer.from_pretrained(
107
+ ... "kakaobrain/kogpt",
108
+ ... revision="KoGPT6B-ryan1.5b-float16", # or float32 version: revision=KoGPT6B-ryan1.5b
109
+ ... bos_token="[BOS]",
110
+ ... eos_token="[EOS]",
111
+ ... unk_token="[UNK]",
112
+ ... pad_token="[PAD]",
113
+ ... mask_token="[MASK]",
114
+ ... )
115
+ >>> model = transformers.GPTJForCausalLM.from_pretrained(
116
+ ... "kakaobrain/kogpt",
117
+ ... revision="KoGPT6B-ryan1.5b-float16", # or float32 version: revision=KoGPT6B-ryan1.5b
118
+ ... pad_token_id=tokenizer.eos_token_id,
119
+ ... use_cache=False,
120
+ ... device_map={"": rank},
121
+ ... torch_dtype=torch.float16,
122
+ ... quantization_config=quantization_config,
123
+ ... )
124
+ >>> model = prepare_model_for_kbit_training(model)
125
+ >>> lora_model = get_peft_model(model, config)
126
+ ```
127
+
128
+ **Attributes**:
129
+ - **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted.
130
+ - **peft_config** ([`LoraConfig`]): The configuration of the Lora model.
131
+ """
132
+
133
+ prefix: str = "lora_"
134
+
135
+ def __init__(self, model, config, adapter_name) -> None:
136
+ super().__init__(model, config, adapter_name)
137
+
138
+ def _check_new_adapter_config(self, config: LoraConfig) -> None:
139
+ """
140
+ A helper method to check the config when a new adapter is being added.
141
+
142
+ Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters.
143
+
144
+ """
145
+ # TODO: there should be a check if any of the existing adapters actually has bias != "none", or else the check
146
+ # does not fully correspond to the error message.
147
+ if (len(self.peft_config) > 1) and (config.bias != "none"):
148
+ raise ValueError(
149
+ f"{self.__class__.__name__} supports only 1 adapter with bias. When using multiple adapters, "
150
+ "set bias to 'none' for all adapters."
151
+ )
152
+
153
+ @staticmethod
154
+ def _check_target_module_exists(lora_config, key):
155
+ return check_target_module_exists(lora_config, key)
156
+
157
+ def _prepare_model(self, peft_config: LoraConfig, model: nn.Module):
158
+ r"""
159
+ A private method to modify the model structure before adapter is applied.
160
+
161
+ Args:
162
+ peft_config (`PeftConfig`):
163
+ The prepared adapter config.
164
+ model (`nn.Module`):
165
+ The model that is going to be adapted.
166
+ """
167
+ if peft_config.layer_replication:
168
+ replicate_layers(model, peft_config.layer_replication)
169
+
170
+ def _create_and_replace(
171
+ self,
172
+ lora_config,
173
+ adapter_name,
174
+ target,
175
+ target_name,
176
+ parent,
177
+ current_key,
178
+ ):
179
+ if current_key is None:
180
+ raise ValueError("Current Key shouldn't be `None`")
181
+
182
+ # Regexp matching - Find key which matches current target_name in patterns provided
183
+ pattern_keys = list(chain(lora_config.rank_pattern.keys(), lora_config.alpha_pattern.keys()))
184
+ target_name_key = next(filter(lambda key: re.match(rf".*\.{key}$", current_key), pattern_keys), current_key)
185
+ r = lora_config.rank_pattern.get(target_name_key, lora_config.r)
186
+ alpha = lora_config.alpha_pattern.get(target_name_key, lora_config.lora_alpha)
187
+
188
+ kwargs = {
189
+ "r": r,
190
+ "lora_alpha": alpha,
191
+ "lora_dropout": lora_config.lora_dropout,
192
+ "fan_in_fan_out": lora_config.fan_in_fan_out,
193
+ "init_lora_weights": lora_config.init_lora_weights,
194
+ "use_rslora": lora_config.use_rslora,
195
+ "use_dora": lora_config.use_dora,
196
+ "loaded_in_8bit": getattr(self.model, "is_loaded_in_8bit", False),
197
+ "loaded_in_4bit": getattr(self.model, "is_loaded_in_4bit", False),
198
+ }
199
+
200
+ quant_methods = ["gptq", "aqlm", "awq"]
201
+ for quant_method in quant_methods:
202
+ quantization_config = get_quantization_config(self.model, method=quant_method)
203
+ if quantization_config is not None:
204
+ kwargs[f"{quant_method}_quantization_config"] = quantization_config
205
+
206
+ # note: AdaLoraLayer is a subclass of LoraLayer, we need to exclude it
207
+ from peft.tuners.adalora import AdaLoraLayer
208
+
209
+ if isinstance(target, LoraLayer) and not isinstance(target, AdaLoraLayer):
210
+ target.update_layer(
211
+ adapter_name,
212
+ r,
213
+ lora_alpha=alpha,
214
+ lora_dropout=lora_config.lora_dropout,
215
+ init_lora_weights=lora_config.init_lora_weights,
216
+ use_rslora=lora_config.use_rslora,
217
+ use_dora=lora_config.use_dora,
218
+ )
219
+ else:
220
+ new_module = self._create_new_module(lora_config, adapter_name, target, **kwargs)
221
+ if adapter_name != self.active_adapter:
222
+ # adding an additional adapter: it is not automatically trainable
223
+ new_module.requires_grad_(False)
224
+ self._replace_module(parent, target_name, new_module, target)
225
+
226
+ def _replace_module(self, parent, child_name, new_module, child):
227
+ setattr(parent, child_name, new_module)
228
+ # It's not necessary to set requires_grad here, as that is handled by
229
+ # _mark_only_adapters_as_trainable
230
+
231
+ # child layer wraps the original module, unpack it
232
+ if hasattr(child, "base_layer"):
233
+ child = child.base_layer
234
+
235
+ if not hasattr(new_module, "base_layer"):
236
+ new_module.weight = child.weight
237
+ if hasattr(child, "bias"):
238
+ new_module.bias = child.bias
239
+
240
+ if getattr(child, "state", None) is not None:
241
+ if hasattr(new_module, "base_layer"):
242
+ new_module.base_layer.state = child.state
243
+ else:
244
+ new_module.state = child.state
245
+ new_module.to(child.weight.device)
246
+
247
+ # dispatch to correct device
248
+ for name, module in new_module.named_modules():
249
+ if (self.prefix in name) or ("ranknum" in name):
250
+ weight = child.qweight if hasattr(child, "qweight") else child.weight
251
+ module.to(weight.device)
252
+
253
+ def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None:
254
+ for n, p in model.named_parameters():
255
+ if self.prefix not in n:
256
+ p.requires_grad = False
257
+
258
+ for active_adapter in self.active_adapters:
259
+ bias = self.peft_config[active_adapter].bias
260
+ if bias == "none":
261
+ continue
262
+
263
+ if bias == "all":
264
+ for n, p in model.named_parameters():
265
+ if "bias" in n:
266
+ p.requires_grad = True
267
+ elif bias == "lora_only":
268
+ for m in model.modules():
269
+ if isinstance(m, LoraLayer) and hasattr(m, "bias") and m.bias is not None:
270
+ m.bias.requires_grad = True
271
+ else:
272
+ raise NotImplementedError(f"Requested bias: {bias}, is not implemented.")
273
+
274
+ @staticmethod
275
+ def _create_new_module(lora_config, adapter_name, target, **kwargs):
276
+ # Collect dispatcher functions to decide what backend to use for the replaced LoRA layer. The order matters,
277
+ # because the first match is always used. Therefore, the default layers should be checked last.
278
+ dispatchers = []
279
+
280
+ # avoid eager bnb import
281
+ if is_bnb_available():
282
+ from .bnb import dispatch_bnb_8bit
283
+
284
+ dispatchers.append(dispatch_bnb_8bit)
285
+
286
+ if is_bnb_4bit_available():
287
+ from .bnb import dispatch_bnb_4bit
288
+
289
+ dispatchers.append(dispatch_bnb_4bit)
290
+
291
+ dispatchers.extend([dispatch_aqlm, dispatch_awq, dispatch_gptq, dispatch_megatron, dispatch_default])
292
+
293
+ new_module = None
294
+ for dispatcher in dispatchers:
295
+ new_module = dispatcher(target, adapter_name, lora_config=lora_config, **kwargs)
296
+ if new_module is not None: # first match wins
297
+ break
298
+
299
+ if new_module is None:
300
+ # no module could be matched
301
+ raise ValueError(
302
+ f"Target module {target} is not supported. Currently, only the following modules are supported: "
303
+ "`torch.nn.Linear`, `torch.nn.Embedding`, `torch.nn.Conv2d`, `transformers.pytorch_utils.Conv1D`."
304
+ )
305
+
306
+ return new_module
307
+
308
+ def __getattr__(self, name: str):
309
+ """Forward missing attributes to the wrapped module."""
310
+ try:
311
+ return super().__getattr__(name) # defer to nn.Module's logic
312
+ except AttributeError:
313
+ return getattr(self.model, name)
314
+
315
+ def get_peft_config_as_dict(self, inference: bool = False):
316
+ config_dict = {}
317
+ for key, value in self.peft_config.items():
318
+ config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()}
319
+ if inference:
320
+ config["inference_mode"] = True
321
+ config_dict[key] = config
322
+ return config
323
+
324
+ def _set_adapter_layers(self, enabled: bool = True) -> None:
325
+ for module in self.model.modules():
326
+ if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)):
327
+ module.enable_adapters(enabled)
328
+
329
+ def enable_adapter_layers(self) -> None:
330
+ """Enable all adapters.
331
+
332
+ Call this if you have previously disabled all adapters and want to re-enable them.
333
+ """
334
+ self._set_adapter_layers(enabled=True)
335
+
336
+ def disable_adapter_layers(self) -> None:
337
+ """Disable all adapters.
338
+
339
+ When disabling all adapters, the model output corresponds to the output of the base model.
340
+ """
341
+ for active_adapter in self.active_adapters:
342
+ val = self.peft_config[active_adapter].bias
343
+ if val != "none":
344
+ msg = (
345
+ f"Careful, disabling adapter layers with bias configured to be '{val}' does not produce the same "
346
+ "output as the the base model would without adaption."
347
+ )
348
+ warnings.warn(msg)
349
+ self._set_adapter_layers(enabled=False)
350
+
351
+ def set_adapter(self, adapter_name: str | list[str]) -> None:
352
+ """Set the active adapter(s).
353
+
354
+ Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is
355
+ not desired, use the following code.
356
+
357
+ ```py
358
+ >>> for name, param in model_peft.named_parameters():
359
+ ... if ...: # some check on name (ex. if 'lora' in name)
360
+ ... param.requires_grad = False
361
+ ```
362
+
363
+ Args:
364
+ adapter_name (`str` or `list[str]`): Name of the adapter(s) to be activated.
365
+ """
366
+ for module in self.model.modules():
367
+ if isinstance(module, LoraLayer):
368
+ if module.merged:
369
+ warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.")
370
+ module.unmerge()
371
+ module.set_adapter(adapter_name)
372
+ self.active_adapter = adapter_name
373
+
374
+ @contextmanager
375
+ def _enable_peft_forward_hooks(self, *args, **kwargs):
376
+ # If adapter_names is passed as an argument, we inject it into the forward arguments.
377
+ adapter_names = kwargs.pop("adapter_names", None)
378
+ if adapter_names is None:
379
+ # nothing to do
380
+ yield
381
+ return
382
+
383
+ if self.training:
384
+ raise ValueError("Cannot pass `adapter_names` when the model is in training mode.")
385
+
386
+ hook_handles = []
387
+ for module in self.modules():
388
+ if isinstance(module, LoraLayer):
389
+ pre_forward = partial(_adapter_names_pre_forward_hook, adapter_names=adapter_names)
390
+ handle = module.register_forward_pre_hook(pre_forward, with_kwargs=True)
391
+ hook_handles.append(handle)
392
+
393
+ yield
394
+
395
+ for handle in hook_handles:
396
+ handle.remove()
397
+
398
+ def _check_merge_allowed(self):
399
+ """Verify that the configuration supports merging.
400
+
401
+ Currently gptq quantization and replicated layers do not support merging.
402
+ """
403
+ if getattr(self.model, "quantization_method", None) == "gptq":
404
+ raise ValueError("Cannot merge LORA layers when the model is gptq quantized")
405
+ if self.peft_config.get("layer_replication"):
406
+ raise ValueError("Cannot merge LORA layers when base model layers are replicated")
407
+
408
+ @staticmethod
409
+ def _prepare_adapter_config(peft_config, model_config):
410
+ if peft_config.target_modules is None:
411
+ if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING:
412
+ raise ValueError("Please specify `target_modules` in `peft_config`")
413
+ peft_config.target_modules = set(
414
+ TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config["model_type"]]
415
+ )
416
+ return peft_config
417
+
418
+ def _unload_and_optionally_merge(
419
+ self,
420
+ merge=True,
421
+ progressbar: bool = False,
422
+ safe_merge: bool = False,
423
+ adapter_names: Optional[list[str]] = None,
424
+ ):
425
+ if merge:
426
+ self._check_merge_allowed()
427
+
428
+ key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key]
429
+ desc = "Unloading " + ("and merging " if merge else "") + "model"
430
+ for key in tqdm(key_list, disable=not progressbar, desc=desc):
431
+ try:
432
+ parent, target, target_name = _get_submodules(self.model, key)
433
+ except AttributeError:
434
+ continue
435
+ with onload_layer(target):
436
+ if hasattr(target, "base_layer"):
437
+ if merge:
438
+ target.merge(safe_merge=safe_merge, adapter_names=adapter_names)
439
+ self._replace_module(parent, target_name, target.get_base_layer(), target)
440
+ elif isinstance(target, ModulesToSaveWrapper):
441
+ # save any additional trainable modules part of `modules_to_save`
442
+ new_module = target.modules_to_save[target.active_adapter]
443
+ if hasattr(new_module, "base_layer"):
444
+ # check if the module is itself a tuner layer
445
+ if merge:
446
+ new_module.merge(safe_merge=safe_merge, adapter_names=adapter_names)
447
+ new_module = new_module.get_base_layer()
448
+ setattr(parent, target_name, new_module)
449
+
450
+ return self.model
451
+
452
+ def add_weighted_adapter(
453
+ self,
454
+ adapters,
455
+ weights,
456
+ adapter_name,
457
+ combination_type="svd",
458
+ svd_rank=None,
459
+ svd_clamp=None,
460
+ svd_full_matrices=True,
461
+ svd_driver=None,
462
+ density=None,
463
+ majority_sign_method: Literal["total", "frequency"] = "total",
464
+ ) -> None:
465
+ """
466
+ This method adds a new adapter by merging the given adapters with the given weights.
467
+
468
+ When using the `cat` combination_type you should be aware that rank of the resulting adapter will be equal to
469
+ the sum of all adapters ranks. So it's possible that the mixed adapter may become too big and result in OOM
470
+ errors.
471
+
472
+ Args:
473
+ adapters (`list`):
474
+ List of adapter names to be merged.
475
+ weights (`list`):
476
+ List of weights for each adapter.
477
+ adapter_name (`str`):
478
+ Name of the new adapter.
479
+ combination_type (`str`):
480
+ The merging type can be one of [`svd`, `linear`, `cat`, `ties`, `ties_svd`, `dare_ties`, `dare_linear`,
481
+ `dare_ties_svd`, `dare_linear_svd`, `magnitude_prune`, `magnitude_prune_svd`]. When using the `cat`
482
+ combination_type, the rank of the resulting adapter is equal to the sum of all adapters ranks (the
483
+ mixed adapter may be too big and result in OOM errors).
484
+ svd_rank (`int`, *optional*):
485
+ Rank of output adapter for svd. If None provided, will use max rank of merging adapters.
486
+ svd_clamp (`float`, *optional*):
487
+ A quantile threshold for clamping SVD decomposition output. If None is provided, do not perform
488
+ clamping. Defaults to None.
489
+ svd_full_matrices (`bool`, *optional*):
490
+ Controls whether to compute the full or reduced SVD, and consequently, the shape of the returned
491
+ tensors U and Vh. Defaults to True.
492
+ svd_driver (`str`, *optional*):
493
+ Name of the cuSOLVER method to be used. This keyword argument only works when merging on CUDA. Can be
494
+ one of [None, `gesvd`, `gesvdj`, `gesvda`]. For more info please refer to `torch.linalg.svd`
495
+ documentation. Defaults to None.
496
+ density (`float`, *optional*):
497
+ Value between 0 and 1. 0 means all values are pruned and 1 means no values are pruned. Should be used
498
+ with [`ties`, `ties_svd`, `dare_ties`, `dare_linear`, `dare_ties_svd`, `dare_linear_svd`,
499
+ `magnintude_prune`, `magnitude_prune_svd`]
500
+ majority_sign_method (`str`):
501
+ The method, should be one of ["total", "frequency"], to use to get the magnitude of the sign values.
502
+ Should be used with [`ties`, `ties_svd`, `dare_ties`, `dare_ties_svd`]
503
+ """
504
+
505
+ if adapter_name in list(self.peft_config.keys()):
506
+ return
507
+ for adapter in adapters:
508
+ if adapter not in list(self.peft_config.keys()):
509
+ raise ValueError(f"Adapter {adapter} does not exist")
510
+
511
+ # if there is only one adapter, we can only use linear merging
512
+ combination_type = "linear" if len(adapters) == 1 else combination_type
513
+
514
+ adapters_ranks = [self.peft_config[adapter].r for adapter in adapters]
515
+ if combination_type in ("linear", "ties", "dare_ties", "dare_linear", "magnitude_prune"):
516
+ # all adapters ranks should be same, new rank is just this value
517
+ if len(set(adapters_ranks)) != 1:
518
+ raise ValueError(
519
+ "All adapters must have the same r value when using combination_type linear, ties, dare_ties or dare_linear."
520
+ )
521
+ new_rank = adapters_ranks[0]
522
+ elif combination_type == "cat":
523
+ # adapters ranks may be different, new rank is sum of all ranks
524
+ # be careful, because output adapter rank may be really big if mixing a lot of adapters
525
+ new_rank = sum(adapters_ranks)
526
+ elif combination_type.endswith("svd"):
527
+ # new rank is the max of all ranks of the adapters if not provided
528
+ new_rank = svd_rank or max(adapters_ranks)
529
+ else:
530
+ raise ValueError(f"Invalid combination_type: {combination_type}")
531
+
532
+ target_module_types = [type(self.peft_config[adapter].target_modules) for adapter in adapters]
533
+ if not target_module_types:
534
+ raise ValueError(f"Found no adapter matching the names in {adapters}")
535
+ if len(set(target_module_types)) > 1:
536
+ raise ValueError(
537
+ "all adapter configs should follow the same target modules type. "
538
+ "Combining adapters with `target_modules` type being a mix of list/set and string is not supported."
539
+ )
540
+
541
+ if target_module_types[0] == str:
542
+ new_target_modules = "|".join(f"({self.peft_config[adapter].target_modules})" for adapter in adapters)
543
+ elif target_module_types[0] == set:
544
+ new_target_modules = reduce(
545
+ operator.or_, (self.peft_config[adapter].target_modules for adapter in adapters)
546
+ )
547
+ else:
548
+ raise TypeError(f"Invalid type {target_module_types[0]} found in target_modules")
549
+
550
+ self.peft_config[adapter_name] = replace(
551
+ self.peft_config[adapters[0]],
552
+ r=new_rank,
553
+ lora_alpha=new_rank,
554
+ target_modules=new_target_modules,
555
+ )
556
+ self.inject_adapter(self.model, adapter_name)
557
+
558
+ # Do we really need that?
559
+ _freeze_adapter(self.model, adapter_name)
560
+
561
+ key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key]
562
+ for key in key_list:
563
+ _, target, _ = _get_submodules(self.model, key)
564
+ if isinstance(target, LoraLayer):
565
+ if adapter_name in target.lora_A:
566
+ target_lora_A = target.lora_A[adapter_name].weight
567
+ target_lora_B = target.lora_B[adapter_name].weight
568
+ elif adapter_name in target.lora_embedding_A:
569
+ target_lora_A = target.lora_embedding_A[adapter_name]
570
+ target_lora_B = target.lora_embedding_B[adapter_name]
571
+ else:
572
+ continue
573
+
574
+ target_lora_A.data = target_lora_A.data * 0.0
575
+ target_lora_B.data = target_lora_B.data * 0.0
576
+ if combination_type == "cat":
577
+ loras_A, loras_B = [], []
578
+ for adapter, weight in zip(adapters, weights):
579
+ if adapter in target.lora_A:
580
+ current_adapter_lora_A = target.lora_A[adapter].weight
581
+ current_adapter_lora_B = target.lora_B[adapter].weight
582
+ elif adapter in target.lora_embedding_A:
583
+ current_adapter_lora_A = target.lora_embedding_A[adapter]
584
+ current_adapter_lora_B = target.lora_embedding_B[adapter]
585
+ else:
586
+ continue
587
+ loras_A.append(current_adapter_lora_A.data * weight * target.scaling[adapter])
588
+ loras_B.append(current_adapter_lora_B.data)
589
+
590
+ if len(loras_A) == 0:
591
+ raise ValueError("No matching LoRAs found. Please raise an issue on GitHub.")
592
+ loras_A = torch.cat(loras_A, dim=0)
593
+ loras_B = torch.cat(loras_B, dim=1)
594
+ target_lora_A.data[: loras_A.shape[0], :] = loras_A
595
+ target_lora_B.data[:, : loras_B.shape[1]] = loras_B
596
+ elif combination_type in [
597
+ "svd",
598
+ "ties_svd",
599
+ "dare_linear_svd",
600
+ "dare_ties_svd",
601
+ "magnitude_prune_svd",
602
+ ]:
603
+ target_lora_A.data, target_lora_B.data = self._svd_generalized_task_arithmetic_weighted_adapter(
604
+ combination_type,
605
+ adapters,
606
+ weights,
607
+ new_rank,
608
+ target,
609
+ target_lora_A,
610
+ target_lora_B,
611
+ density,
612
+ majority_sign_method,
613
+ svd_clamp,
614
+ full_matrices=svd_full_matrices,
615
+ driver=svd_driver,
616
+ )
617
+ elif combination_type in ["linear", "ties", "dare_linear", "dare_ties", "magnitude_prune"]:
618
+ target_lora_A.data, target_lora_B.data = self._generalized_task_arithmetic_weighted_adapter(
619
+ combination_type, adapters, weights, target, density, majority_sign_method
620
+ )
621
+
622
+ def _svd_generalized_task_arithmetic_weighted_adapter(
623
+ self,
624
+ combination_type,
625
+ adapters,
626
+ weights,
627
+ new_rank,
628
+ target,
629
+ target_lora_A,
630
+ target_lora_B,
631
+ density,
632
+ majority_sign_method,
633
+ clamp=None,
634
+ full_matrices=True,
635
+ driver=None,
636
+ ):
637
+ valid_adapters = []
638
+ valid_weights = []
639
+ is_embedding = any(adapter in target.lora_embedding_A for adapter in adapters)
640
+ for adapter, weight in zip(adapters, weights):
641
+ if adapter in target.lora_A or adapter in target.lora_embedding_A:
642
+ valid_adapters.append(adapter)
643
+ valid_weights.append(weight * target.scaling[adapter])
644
+
645
+ # if no valid adapter, nothing to do
646
+ if len(valid_adapters) == 0:
647
+ raise ValueError("No matching LoRAs found. Please raise an issue on Github.")
648
+ delta_weight = [target.get_delta_weight(adapter) for adapter in valid_adapters]
649
+ valid_weights = torch.tensor(valid_weights).to(delta_weight[0].device)
650
+ if combination_type == "svd":
651
+ delta_weight = task_arithmetic(delta_weight, valid_weights)
652
+ elif combination_type == "ties_svd":
653
+ delta_weight = ties(delta_weight, valid_weights, density, majority_sign_method)
654
+ elif combination_type == "dare_linear_svd":
655
+ delta_weight = dare_linear(delta_weight, valid_weights, density)
656
+ elif combination_type == "dare_ties_svd":
657
+ delta_weight = dare_ties(delta_weight, valid_weights, density, majority_sign_method)
658
+ elif combination_type == "magnitude_prune_svd":
659
+ delta_weight = magnitude_prune(delta_weight, valid_weights, density)
660
+ else:
661
+ raise ValueError(f"Invalid value passed to combination type: {combination_type}")
662
+
663
+ conv2d = isinstance(target, Conv2d)
664
+ if conv2d:
665
+ conv2d_1x1 = target.weight.size()[2:4] == (1, 1)
666
+ if not conv2d_1x1:
667
+ delta_weight = delta_weight.flatten(start_dim=1)
668
+ else:
669
+ delta_weight = delta_weight.squeeze()
670
+ if (hasattr(target, "fan_in_fan_out") and target.fan_in_fan_out) or is_embedding:
671
+ delta_weight = delta_weight.T
672
+
673
+ # based on https://github.com/kohya-ss/sd-scripts/blob/main/networks/svd_merge_lora.py#L114-L131
674
+ U, S, Vh = torch.linalg.svd(delta_weight, full_matrices=full_matrices, driver=driver)
675
+ U = U[:, :new_rank]
676
+ S = S[:new_rank]
677
+ U = U @ torch.diag(S)
678
+ Vh = Vh[:new_rank, :]
679
+ if clamp is not None:
680
+ dist = torch.cat([U.flatten(), Vh.flatten()])
681
+ hi_val = torch.quantile(dist, clamp)
682
+ low_val = -hi_val
683
+ U = U.clamp(low_val, hi_val)
684
+ Vh = Vh.clamp(low_val, hi_val)
685
+ if conv2d:
686
+ U = U.reshape(target_lora_B.data.shape)
687
+ Vh = Vh.reshape(target_lora_A.data.shape)
688
+ return Vh, U
689
+
690
+ def _generalized_task_arithmetic_weighted_adapter(
691
+ self,
692
+ combination_type,
693
+ adapters,
694
+ weights,
695
+ target,
696
+ density,
697
+ majority_sign_method,
698
+ ):
699
+ # account weights for LoRA A and B layers.
700
+ valid_weights = []
701
+ lora_A_deltas = []
702
+ lora_B_deltas = []
703
+ for adapter, weight in zip(adapters, weights):
704
+ if adapter in target.lora_A:
705
+ current_adapter_lora_A = target.lora_A[adapter].weight
706
+ current_adapter_lora_B = target.lora_B[adapter].weight
707
+ elif adapter in target.lora_embedding_A:
708
+ current_adapter_lora_A = target.lora_embedding_A[adapter]
709
+ current_adapter_lora_B = target.lora_embedding_B[adapter]
710
+ else:
711
+ continue
712
+ valid_weights.append(math.sqrt(weight * target.scaling[adapter]))
713
+ lora_A_deltas.append(current_adapter_lora_A.data)
714
+ lora_B_deltas.append(current_adapter_lora_B.data)
715
+ valid_weights = torch.tensor(valid_weights).to(lora_A_deltas[0].device)
716
+ lora_deltas = [lora_A_deltas, lora_B_deltas]
717
+ dtype = lora_A_deltas[0].dtype
718
+ for i, task_tensors in enumerate(lora_deltas):
719
+ if combination_type == "linear":
720
+ lora_deltas[i] = task_arithmetic(task_tensors, valid_weights)
721
+ elif combination_type == "ties":
722
+ lora_deltas[i] = ties(task_tensors, valid_weights, density, majority_sign_method)
723
+ elif combination_type == "dare_linear":
724
+ lora_deltas[i] = dare_linear(task_tensors, valid_weights, density)
725
+ elif combination_type == "dare_ties":
726
+ lora_deltas[i] = dare_ties(task_tensors, valid_weights, density, majority_sign_method)
727
+ elif combination_type == "magnitude_prune":
728
+ lora_deltas[i] = magnitude_prune(task_tensors, valid_weights, density)
729
+ else:
730
+ raise ValueError("Invalid combination type")
731
+ lora_deltas = [delta.to(dtype) for delta in lora_deltas]
732
+ return lora_deltas
733
+
734
+ def delete_adapter(self, adapter_name: str) -> None:
735
+ """
736
+ Deletes an existing adapter.
737
+
738
+ Args:
739
+ adapter_name (str): Name of the adapter to be deleted.
740
+ """
741
+ if adapter_name not in list(self.peft_config.keys()):
742
+ raise ValueError(f"Adapter {adapter_name} does not exist")
743
+ del self.peft_config[adapter_name]
744
+
745
+ key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key]
746
+ new_adapter = None
747
+ for key in key_list:
748
+ _, target, _ = _get_submodules(self.model, key)
749
+ if isinstance(target, LoraLayer):
750
+ target.delete_adapter(adapter_name)
751
+ if new_adapter is None:
752
+ new_adapter = target.active_adapters[:]
753
+
754
+ self.active_adapter = new_adapter or []
755
+
756
+ def merge_and_unload(
757
+ self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None
758
+ ) -> torch.nn.Module:
759
+ r"""
760
+ This method merges the LoRa layers into the base model. This is needed if someone wants to use the base model
761
+ as a standalone model.
762
+
763
+ Args:
764
+ progressbar (`bool`):
765
+ whether to show a progressbar indicating the unload and merge process
766
+ safe_merge (`bool`):
767
+ whether to activate the safe merging check to check if there is any potential Nan in the adapter
768
+ weights
769
+ adapter_names (`List[str]`, *optional*):
770
+ The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
771
+ to `None`.
772
+ Example:
773
+
774
+ ```py
775
+ >>> from transformers import AutoModelForCausalLM
776
+ >>> from peft import PeftModel
777
+
778
+ >>> base_model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-40b")
779
+ >>> peft_model_id = "smangrul/falcon-40B-int4-peft-lora-sfttrainer-sample"
780
+ >>> model = PeftModel.from_pretrained(base_model, peft_model_id)
781
+ >>> merged_model = model.merge_and_unload()
782
+ ```
783
+ """
784
+ return self._unload_and_optionally_merge(
785
+ progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names
786
+ )
787
+
788
+ def unload(self) -> torch.nn.Module:
789
+ """
790
+ Gets back the base model by removing all the lora modules without merging. This gives back the original base
791
+ model.
792
+ """
793
+ return self._unload_and_optionally_merge(merge=False)
llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/tp_layer.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import importlib
16
+ import warnings
17
+ from typing import Any, Optional
18
+
19
+ import torch
20
+ import torch.nn as nn
21
+ import torch.nn.init as init
22
+
23
+ from peft.tuners.tuners_utils import BaseTunerLayer
24
+
25
+ from .layer import LoraLayer
26
+
27
+
28
+ class LoraParallelLinear(nn.Module, LoraLayer):
29
+ """
30
+ When the target layer parallel_linear is RowParallelLinear, in order to keep the input and output shapes
31
+ consistent, we need to split the lora matrix A into rows, and the lora_B at this time should be a complete linear
32
+ layer; In the same way, when the target layer is ColumnParallelLinear, we perform column segmentation on lora_B,
33
+ while lora_A is still a complete linear layer.
34
+ """
35
+
36
+ def __init__(
37
+ self,
38
+ base_layer,
39
+ adapter_name: str,
40
+ backend,
41
+ r: int = 0,
42
+ lora_alpha: int = 1,
43
+ lora_dropout: float = 0.0,
44
+ fan_in_fan_out: bool = False,
45
+ init_lora_weights: bool = True,
46
+ use_rslora: bool = False,
47
+ use_dora: bool = False,
48
+ **kwargs,
49
+ ):
50
+ super().__init__()
51
+ LoraLayer.__init__(self, base_layer=base_layer)
52
+
53
+ if use_dora:
54
+ raise ValueError(f"{self.__class__.__name__} does not support DoRA yet, please set it to False")
55
+
56
+ self.backend = backend
57
+ self.is_parallel_a = isinstance(base_layer, backend.RowParallelLinear)
58
+ self.fan_in_fan_out = fan_in_fan_out
59
+ self._active_adapter = adapter_name
60
+
61
+ megatron_config = kwargs["megatron_config"]
62
+ parallel_linear_kwargs = {"megatron_config": megatron_config}
63
+ init_method = init.xavier_normal_
64
+ if hasattr(megatron_config, "init_method"):
65
+ init_method = megatron_config.init_method
66
+ input_is_parallel = True
67
+ gather_output = False
68
+ if isinstance(base_layer, self.backend.RowParallelLinear):
69
+ input_is_parallel = base_layer.input_is_parallel
70
+ else:
71
+ gather_output = base_layer.gather_output
72
+ self.update_layer(
73
+ adapter_name,
74
+ r,
75
+ lora_alpha=lora_alpha,
76
+ lora_dropout=lora_dropout,
77
+ init_lora_weights=init_lora_weights,
78
+ use_rslora=use_rslora,
79
+ use_dora=use_dora,
80
+ init_method=init_method,
81
+ input_is_parallel=input_is_parallel,
82
+ gather_output=gather_output,
83
+ **parallel_linear_kwargs,
84
+ )
85
+
86
+ self.is_target_conv_1d_layer = False
87
+
88
+ def update_layer(
89
+ self,
90
+ adapter_name,
91
+ r,
92
+ lora_alpha,
93
+ lora_dropout,
94
+ init_lora_weights,
95
+ use_rslora,
96
+ use_dora=False,
97
+ init_method=init.xavier_normal_,
98
+ input_is_parallel=True,
99
+ gather_output=False,
100
+ **parallel_linear_kwargs,
101
+ ):
102
+ if r <= 0:
103
+ raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
104
+ self.r[adapter_name] = r
105
+ self.lora_alpha[adapter_name] = lora_alpha
106
+ if lora_dropout > 0.0:
107
+ lora_dropout_layer = nn.Dropout(p=lora_dropout)
108
+ else:
109
+ lora_dropout_layer = nn.Identity()
110
+
111
+ self.lora_dropout[adapter_name] = lora_dropout_layer
112
+
113
+ megatron_config = parallel_linear_kwargs["megatron_config"]
114
+ # lora needs to be forced to upgrade to 32-bit precision, otherwise it will overflow
115
+ megatron_config.params_dtype = torch.float32
116
+ if self.is_parallel_a:
117
+ lora_a = self.backend.RowParallelLinear(
118
+ input_size=self.in_features,
119
+ output_size=r,
120
+ bias=False,
121
+ input_is_parallel=input_is_parallel,
122
+ skip_bias_add=True,
123
+ init_method=init_method,
124
+ config=megatron_config,
125
+ )
126
+ lora_b = nn.Linear(in_features=r, out_features=self.out_features, bias=False, dtype=torch.float32)
127
+ else:
128
+ lora_a = nn.Linear(in_features=self.in_features, out_features=r, bias=False, dtype=torch.float32)
129
+ lora_b = self.backend.ColumnParallelLinear(
130
+ input_size=r,
131
+ output_size=self.out_features,
132
+ bias=False,
133
+ gather_output=gather_output,
134
+ init_method=init_method,
135
+ config=megatron_config,
136
+ )
137
+ self.lora_A[adapter_name] = lora_a
138
+ self.lora_B[adapter_name] = lora_b
139
+ if use_rslora:
140
+ self.scaling[adapter_name] = lora_alpha / (r**0.5)
141
+ else:
142
+ self.scaling[adapter_name] = lora_alpha / r
143
+ if init_lora_weights:
144
+ self.reset_lora_parameters(adapter_name, init_lora_weights)
145
+
146
+ weight = getattr(self.get_base_layer(), "weight", None)
147
+ if weight is not None:
148
+ # the layer is already completely initialized, this is an update
149
+ if weight.dtype.is_floating_point or weight.dtype.is_complex:
150
+ self.to(weight.device, dtype=weight.dtype)
151
+ else:
152
+ self.to(weight.device)
153
+ self.set_adapter(self.active_adapters)
154
+
155
+ def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any):
156
+ previous_dtype = x.dtype
157
+ # If weight is used for matrix multiplication here, the final aggregation operation of the original
158
+ # parallel_linear layer will be missing, so we need to directly call its forward function to obtain the
159
+ # output of the original parallel_linear layer.
160
+ if self.disable_adapters:
161
+ if self.merged:
162
+ self.unmerge()
163
+ result, bias = self.base_layer(x, *args, **kwargs)
164
+ elif self.merged:
165
+ result, bias = self.base_layer(x, *args, **kwargs)
166
+ else:
167
+ result, bias = self.base_layer(x, *args, **kwargs)
168
+ for active_adapter in self.active_adapters:
169
+ if active_adapter not in self.lora_A.keys():
170
+ continue
171
+ lora_A = self.lora_A[active_adapter]
172
+ lora_B = self.lora_B[active_adapter]
173
+ dropout = self.lora_dropout[active_adapter]
174
+ scaling = self.scaling[active_adapter]
175
+ x = x.to(lora_A.weight.dtype)
176
+
177
+ lora_result = lora_A(dropout(x))
178
+ if isinstance(lora_result, tuple):
179
+ lora_result = lora_result[0]
180
+ lora_result = lora_B(lora_result)
181
+ if isinstance(lora_result, tuple):
182
+ lora_result = lora_result[0]
183
+ lora_result = lora_result * scaling
184
+
185
+ result = result + lora_result
186
+
187
+ result = result.to(previous_dtype)
188
+ return result, bias
189
+
190
+
191
+ def dispatch_megatron(
192
+ target: torch.nn.Module,
193
+ adapter_name: str,
194
+ lora_config,
195
+ **kwargs: Any,
196
+ ) -> Optional[torch.nn.Module]:
197
+ new_module = None
198
+
199
+ if isinstance(target, BaseTunerLayer):
200
+ target_base_layer = target.get_base_layer()
201
+ else:
202
+ target_base_layer = target
203
+
204
+ if lora_config.megatron_config:
205
+ megatron_core = importlib.import_module(lora_config.megatron_core)
206
+ else:
207
+ megatron_core = None
208
+
209
+ if megatron_core and isinstance(
210
+ target_base_layer,
211
+ (megatron_core.tensor_parallel.ColumnParallelLinear, megatron_core.tensor_parallel.RowParallelLinear),
212
+ ):
213
+ megatron_kwargs = kwargs.copy()
214
+ megatron_config = lora_config.megatron_config
215
+ if isinstance(megatron_config, dict):
216
+ transformer_config_class = megatron_core.transformer.transformer_config.TransformerConfig
217
+ megatron_config = transformer_config_class(**lora_config.megatron_config)
218
+ megatron_kwargs["megatron_config"] = megatron_config
219
+ if megatron_kwargs["fan_in_fan_out"]:
220
+ warnings.warn(
221
+ "fan_in_fan_out is set to True but the target module is `ColumnParallelLinear` "
222
+ "or `RowParallelLinear`. "
223
+ "Setting fan_in_fan_out to False."
224
+ )
225
+ megatron_kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False
226
+ new_module = LoraParallelLinear(
227
+ base_layer=target, adapter_name=adapter_name, backend=megatron_core.tensor_parallel, **megatron_kwargs
228
+ )
229
+
230
+ return new_module
llmeval-env/lib/python3.10/site-packages/peft/tuners/lycoris_utils.py ADDED
@@ -0,0 +1,428 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from __future__ import annotations
15
+
16
+ import warnings
17
+ from abc import abstractmethod
18
+ from dataclasses import dataclass, field
19
+ from typing import Any, Optional, Union
20
+
21
+ import torch
22
+ import torch.nn as nn
23
+ from tqdm import tqdm
24
+
25
+ from peft.config import PeftConfig
26
+ from peft.utils import (
27
+ ModulesToSaveWrapper,
28
+ _get_submodules,
29
+ )
30
+
31
+ from .tuners_utils import BaseTuner, BaseTunerLayer, check_adapters_to_merge, check_target_module_exists
32
+
33
+
34
+ @dataclass
35
+ class LycorisConfig(PeftConfig):
36
+ r"""
37
+ A base config for LyCORIS like adapters
38
+ """
39
+
40
+ rank_pattern: Optional[dict] = field(
41
+ default_factory=dict,
42
+ metadata={
43
+ "help": (
44
+ "The mapping from layer names or regexp expression to ranks which are different from the default rank specified by `r`. "
45
+ "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 8`}"
46
+ )
47
+ },
48
+ )
49
+ alpha_pattern: Optional[dict] = field(
50
+ default_factory=dict,
51
+ metadata={
52
+ "help": (
53
+ "The mapping from layer names or regexp expression to alphas which are different from the default alpha specified by `alpha`. "
54
+ "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 32`}"
55
+ )
56
+ },
57
+ )
58
+
59
+
60
+ class LycorisLayer(BaseTunerLayer):
61
+ r"""
62
+ A base layer for LyCORIS like adapters
63
+ """
64
+
65
+ # adapter_layer_names needs to be defined on the child class
66
+ other_param_names = ("r", "alpha", "scaling", "rank_dropout", "module_dropout")
67
+
68
+ def __init__(self, base_layer: nn.Module) -> None:
69
+ self.base_layer = base_layer
70
+ self.r = {}
71
+ self.alpha = {}
72
+ self.scaling = {}
73
+ self.rank_dropout = {}
74
+ self.module_dropout = {}
75
+
76
+ # Tuner info
77
+ self._disable_adapters = False
78
+ self.merged_adapters = []
79
+
80
+ @property
81
+ @abstractmethod
82
+ def _available_adapters(self) -> set[str]:
83
+ ...
84
+
85
+ def _init_empty_weights(self, cls, *args, **kwargs) -> None:
86
+ # A helper method that allows to initialize the layer of the given class without spending time to initialize the
87
+ # model weights. The implementation is inspired by
88
+ # https://pytorch.org/docs/stable/generated/torch.nn.utils.skip_init.html but this function cannot be used
89
+ # directly.
90
+ # Instead of this approach, it would be possible to bypass the __init__ of the class but that runs the risk of
91
+ # omitting important logic inside that __init__.
92
+ kwargs = kwargs.copy()
93
+ final_device = kwargs.pop("device", "cpu")
94
+ cls.__init__(self, *args, device="meta", **kwargs)
95
+ self.to_empty(device=final_device)
96
+
97
+ @abstractmethod
98
+ def create_adapter_parameters(self, adapter_name: str, r: int, **kwargs):
99
+ ...
100
+
101
+ # TODO: refactor LoRA to use the same approach
102
+ @abstractmethod
103
+ def _get_delta_activations(self, adapter_name: str, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
104
+ """Activations added on top of the base layer output (i.e. after the base layer forward pass)"""
105
+
106
+ @abstractmethod
107
+ def get_delta_weight(self, adapter_name: str) -> torch.Tensor:
108
+ ...
109
+
110
+ def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
111
+ """
112
+ Merge the active adapter weights into the base weights
113
+
114
+ Args:
115
+ safe_merge (`bool`, *optional*):
116
+ If `True`, the merge operation will be performed in a copy of the original weights and check for NaNs
117
+ before merging the weights. This is useful if you want to check if the merge operation will produce
118
+ NaNs. Defaults to `False`.
119
+ adapter_names (`List[str]`, *optional*):
120
+ The list of adapter names that should be merged. If `None`, all active adapters will be merged.
121
+ Defaults to `None`.
122
+ """
123
+ adapter_names = check_adapters_to_merge(self, adapter_names)
124
+ if not adapter_names:
125
+ # no adapter to merge
126
+ return
127
+
128
+ for active_adapter in adapter_names:
129
+ if active_adapter in self._available_adapters:
130
+ base_layer = self.get_base_layer()
131
+ if safe_merge:
132
+ orig_weights = base_layer.weight.data.clone()
133
+ orig_weights += self.get_delta_weight(active_adapter)
134
+
135
+ if not torch.isfinite(orig_weights).all():
136
+ raise ValueError(
137
+ f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
138
+ )
139
+
140
+ base_layer.weight.data = orig_weights
141
+ else:
142
+ base_layer.weight.data += self.get_delta_weight(active_adapter)
143
+ self.merged_adapters.append(active_adapter)
144
+
145
+ @abstractmethod
146
+ def reset_adapter_parameters(self, adapter_name: str):
147
+ ...
148
+
149
+ def set_scale(self, adapter, scale):
150
+ if adapter not in self._available_adapters:
151
+ # Ignore the case where the adapter is not in the layer
152
+ return
153
+ self.scaling[adapter] = scale * self.alpha[adapter] / self.r[adapter]
154
+
155
+ def scale_layer(self, scale: float) -> None:
156
+ if scale == 1:
157
+ return
158
+
159
+ for active_adapter in self.active_adapters:
160
+ if active_adapter not in self._available_adapters:
161
+ continue
162
+
163
+ self.scaling[active_adapter] *= scale
164
+
165
+ def unmerge(self) -> None:
166
+ """
167
+ This method unmerges all merged adapter layers from the base weights.
168
+ """
169
+ if not self.merged:
170
+ warnings.warn("Already unmerged. Nothing to do.")
171
+ return
172
+ while len(self.merged_adapters) > 0:
173
+ active_adapter = self.merged_adapters.pop()
174
+ if active_adapter in self._available_adapters:
175
+ self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter)
176
+
177
+ def unscale_layer(self, scale=None) -> None:
178
+ for active_adapter in self.active_adapters:
179
+ if active_adapter not in self._available_adapters:
180
+ continue
181
+
182
+ if scale is None:
183
+ self.scaling[active_adapter] = self.alpha[active_adapter] / self.r[active_adapter]
184
+ else:
185
+ self.scaling[active_adapter] /= scale
186
+
187
+ @abstractmethod
188
+ def update_layer(self, adapter_name: str, r: int, alpha: float, **kwargs):
189
+ ...
190
+
191
+
192
+ class LycorisTuner(BaseTuner):
193
+ r"""
194
+ A base tuner for LyCORIS like adapters
195
+ """
196
+
197
+ prefix: str
198
+ layers_mapping: dict[type[torch.nn.Module], type[LycorisLayer]]
199
+
200
+ def __init__(self, model, config, adapter_name):
201
+ super().__init__(model, config, adapter_name)
202
+
203
+ def __getattr__(self, name: str):
204
+ """Forward missing attributes to the wrapped module."""
205
+ try:
206
+ return super().__getattr__(name) # defer to nn.Module's logic
207
+ except AttributeError:
208
+ return getattr(self.model, name)
209
+
210
+ @staticmethod
211
+ def _check_target_module_exists(config, key):
212
+ return check_target_module_exists(config, key)
213
+
214
+ @abstractmethod
215
+ def _create_and_replace(
216
+ self,
217
+ config: LycorisConfig,
218
+ adapter_name: str,
219
+ target: Union[LycorisLayer, nn.Module],
220
+ target_name,
221
+ parent,
222
+ current_key,
223
+ ):
224
+ ...
225
+
226
+ @classmethod
227
+ def _create_new_module(cls, config: LycorisConfig, adapter_name: str, target: nn.Module, **kwargs) -> LycorisLayer:
228
+ # Find corresponding subtype of provided target module
229
+ new_module_cls = None
230
+ for subtype, target_cls in cls.layers_mapping.items():
231
+ if (
232
+ hasattr(target, "base_layer")
233
+ and isinstance(target.get_base_layer(), subtype)
234
+ and isinstance(target, BaseTunerLayer)
235
+ ):
236
+ # nested tuner layers are allowed
237
+ new_module_cls = target_cls
238
+ break
239
+ elif isinstance(target, subtype):
240
+ new_module_cls = target_cls
241
+ break
242
+
243
+ # We didn't find corresponding type, so adapter for this layer is not supported
244
+ if new_module_cls is None:
245
+ supported_modules = ", ".join(layer.__name__ for layer in cls.layers_mapping.keys())
246
+ raise ValueError(
247
+ f"Target module of type {type(target)} not supported, "
248
+ f"currently only adapters for {supported_modules} are supported"
249
+ )
250
+
251
+ if isinstance(target, BaseTunerLayer):
252
+ target_base_layer = target.get_base_layer()
253
+ else:
254
+ target_base_layer = target
255
+
256
+ if isinstance(target_base_layer, torch.nn.Conv2d):
257
+ new_module = new_module_cls(target, adapter_name=adapter_name, **kwargs)
258
+ elif isinstance(target_base_layer, torch.nn.Linear):
259
+ new_module = new_module_cls(target, adapter_name=adapter_name, **kwargs)
260
+ else:
261
+ supported_modules = ", ".join(layer.__name__ for layer in cls.layers_mapping.keys())
262
+ raise ValueError(
263
+ f"Target module of type {type(target)} not supported, "
264
+ f"currently only adapters for {supported_modules} are supported"
265
+ )
266
+
267
+ return new_module
268
+
269
+ def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None:
270
+ for n, p in model.named_parameters():
271
+ if self.prefix not in n:
272
+ p.requires_grad = False
273
+
274
+ @staticmethod
275
+ def _prepare_adapter_config(peft_config, model_config):
276
+ if peft_config.target_modules is None:
277
+ raise ValueError("Please specify `target_modules` in `peft_config`")
278
+ return peft_config
279
+
280
+ def _replace_module(self, parent, child_name, new_module, child):
281
+ setattr(parent, child_name, new_module)
282
+ # It's not necessary to set requires_grad here, as that is handled by
283
+ # _mark_only_adapters_as_trainable
284
+
285
+ if not hasattr(new_module, "base_layer"):
286
+ new_module.weight = child.weight
287
+ if hasattr(child, "bias"):
288
+ new_module.bias = child.bias
289
+
290
+ if getattr(child, "state", None) is not None:
291
+ if hasattr(new_module, "base_layer"):
292
+ new_module.base_layer.state = child.state
293
+ else:
294
+ new_module.state = child.state
295
+ new_module.to(child.weight.device)
296
+
297
+ # dispatch to correct device
298
+ for name, module in new_module.named_modules():
299
+ if self.prefix in name:
300
+ module.to(child.weight.device)
301
+
302
+ def _set_adapter_layers(self, enabled=True):
303
+ for module in self.model.modules():
304
+ if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)):
305
+ module.enable_adapters(enabled)
306
+
307
+ def _unload_and_optionally_merge(
308
+ self,
309
+ merge: bool = True,
310
+ progressbar: bool = False,
311
+ safe_merge: bool = False,
312
+ adapter_names: Optional[list[str]] = None,
313
+ ):
314
+ if merge:
315
+ if getattr(self.model, "quantization_method", None) == "gptq":
316
+ raise ValueError("Cannot merge LOHA layers when the model is gptq quantized")
317
+
318
+ self._unloading_checks(adapter_names)
319
+ key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key]
320
+ desc = "Unloading " + ("and merging " if merge else "") + "model"
321
+ for key in tqdm(key_list, disable=not progressbar, desc=desc):
322
+ try:
323
+ parent, target, target_name = _get_submodules(self.model, key)
324
+ except AttributeError:
325
+ continue
326
+
327
+ if hasattr(target, "base_layer"):
328
+ if merge:
329
+ target.merge(safe_merge=safe_merge, adapter_names=adapter_names)
330
+ self._replace_module(parent, target_name, target.get_base_layer(), target)
331
+ elif isinstance(target, ModulesToSaveWrapper):
332
+ # save any additional trainable modules part of `modules_to_save`
333
+ new_module = target.modules_to_save[target.active_adapter]
334
+ if hasattr(new_module, "base_layer"):
335
+ # check if the module is itself a tuner layer
336
+ if merge:
337
+ new_module.merge(safe_merge=safe_merge, adapter_names=adapter_names)
338
+ new_module = new_module.get_base_layer()
339
+ setattr(parent, target_name, new_module)
340
+
341
+ return self.model
342
+
343
+ def enable_adapter_layers(self) -> None:
344
+ """Enable all adapters.
345
+
346
+ Call this if you have previously disabled all adapters and want to re-enable them.
347
+ """
348
+ self._set_adapter_layers(enabled=True)
349
+
350
+ def disable_adapter_layers(self) -> None:
351
+ """Disable all adapters.
352
+
353
+ When disabling all adapters, the model output corresponds to the output of the base model.
354
+ """
355
+ self._set_adapter_layers(enabled=False)
356
+
357
+ def merge_and_unload(
358
+ self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None
359
+ ) -> torch.nn.Module:
360
+ r"""
361
+ This method merges the adapter layers into the base model. This is needed if someone wants to use the base
362
+ model as a standalone model.
363
+
364
+ Args:
365
+ progressbar (`bool`):
366
+ whether to show a progressbar indicating the unload and merge process
367
+ safe_merge (`bool`):
368
+ whether to activate the safe merging check to check if there is any potential Nan in the adapter
369
+ weights
370
+ adapter_names (`List[str]`, *optional*):
371
+ The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
372
+ to `None`.
373
+
374
+ """
375
+ return self._unload_and_optionally_merge(
376
+ progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names
377
+ )
378
+
379
+ def unload(self) -> torch.nn.Module:
380
+ """
381
+ Gets back the base model by removing all the lora modules without merging. This gives back the original base
382
+ model.
383
+ """
384
+ return self._unload_and_optionally_merge(merge=False)
385
+
386
+ def set_adapter(self, adapter_name: str | list[str]) -> None:
387
+ """Set the active adapter(s).
388
+
389
+ Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is
390
+ not desired, use the following code.
391
+
392
+ ```py
393
+ >>> for name, param in model_peft.named_parameters():
394
+ ... if ...: # some check on name (ex. if 'lora' in name)
395
+ ... param.requires_grad = False
396
+ ```
397
+
398
+ Args:
399
+ adapter_name (`str` or `list[str]`): Name of the adapter(s) to be activated.
400
+ """
401
+ for module in self.model.modules():
402
+ if isinstance(module, LycorisLayer):
403
+ if module.merged:
404
+ warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.")
405
+ module.unmerge()
406
+ module.set_adapter(adapter_name)
407
+
408
+ def delete_adapter(self, adapter_name: str) -> None:
409
+ """
410
+ Deletes an existing adapter.
411
+
412
+ Args:
413
+ adapter_name (`str`): Name of the adapter to be deleted.
414
+ """
415
+ if adapter_name not in list(self.peft_config.keys()):
416
+ raise ValueError(f"Adapter {adapter_name} does not exist")
417
+ del self.peft_config[adapter_name]
418
+
419
+ key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key]
420
+ new_adapter = None
421
+ for key in key_list:
422
+ _, target, _ = _get_submodules(self.model, key)
423
+ if isinstance(target, LycorisLayer):
424
+ target.delete_adapter(adapter_name)
425
+ if new_adapter is None:
426
+ new_adapter = target.active_adapters[:]
427
+
428
+ self.active_adapter = new_adapter or []
llmeval-env/lib/python3.10/site-packages/peft/tuners/mixed/__init__.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .model import COMPATIBLE_TUNER_TYPES, MixedModel
16
+
17
+
18
+ __all__ = ["COMPATIBLE_TUNER_TYPES", "MixedModel"]
llmeval-env/lib/python3.10/site-packages/peft/tuners/mixed/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (292 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/peft/tuners/mixed/__pycache__/model.cpython-310.pyc ADDED
Binary file (12.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/peft/tuners/mixed/model.py ADDED
@@ -0,0 +1,339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from __future__ import annotations
15
+
16
+ import warnings
17
+ from typing import Any, Optional, Union
18
+
19
+ from torch import nn
20
+ from tqdm import tqdm
21
+
22
+ from peft.tuners import adalora, loha, lokr, lora, oft
23
+ from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists
24
+ from peft.utils import (
25
+ TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING,
26
+ ModulesToSaveWrapper,
27
+ PeftType,
28
+ _get_submodules,
29
+ get_auto_gptq_quant_linear,
30
+ )
31
+
32
+
33
+ # Collection of constants used for all tuners
34
+ COMPATIBLE_TUNER_TYPES = (PeftType.LORA, PeftType.LOHA, PeftType.LOKR, PeftType.ADALORA, PeftType.OFT)
35
+ PREFIXES = [lora.LoraModel.prefix, lokr.LoKrModel.prefix, loha.LoHaModel.prefix, oft.OFTModel.prefix]
36
+ Configs = Union[lora.LoraConfig, loha.LoHaConfig, lokr.LoKrConfig, adalora.AdaLoraConfig, oft.OFTConfig]
37
+ Layers = (lora.layer.LoraLayer, loha.layer.LoHaLayer, lokr.layer.LoKrLayer, adalora.layer.AdaLoraLayer, oft.OFTLayer)
38
+
39
+
40
+ class MixedModel(BaseTuner):
41
+ """
42
+ A class that allows to mix different types of adapters in a single model.
43
+
44
+ Note: This class should usually not be initialized directly. Instead, use `get_peft_model` with the argument
45
+ `mixed=True`.
46
+
47
+ Args:
48
+ model (:obj:`nn.Module`):
49
+ The model to be tuned.
50
+ config (:obj:`PeftConfig`):
51
+ The config of the model to be tuned. The adapter type must be compatible.
52
+ adapter_name (:obj:`str`):
53
+ The name of the first adapter.
54
+ """
55
+
56
+ def __init__(self, model: nn.Module, config: Configs, adapter_name: str) -> None:
57
+ super().__init__(model, config, adapter_name)
58
+
59
+ def _check_new_adapter_config(self, config: Configs) -> None:
60
+ """
61
+ A helper method to check the config when a new adapter is being added.
62
+
63
+ Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters.
64
+
65
+ """
66
+ if not isinstance(config, Configs.__args__):
67
+ raise ValueError(
68
+ f"{self.__class__.__name__} only supports {COMPATIBLE_TUNER_TYPES} configs, but got {type(config)}."
69
+ )
70
+
71
+ biases = (getattr(config, "bias", None) for config in self.peft_config)
72
+ biases = [bias for bias in biases if bias not in (None, "none")]
73
+ if len(biases) > 1:
74
+ raise ValueError(
75
+ f"{self.__class__.__name__} supports only 1 adapter with bias. When using multiple adapters, "
76
+ "set bias to 'none' for all adapters."
77
+ )
78
+
79
+ @staticmethod
80
+ def _check_target_module_exists(config: Configs, key: str):
81
+ return check_target_module_exists(config, key)
82
+
83
+ def _create_and_replace(
84
+ self,
85
+ config: Configs,
86
+ *args: Any,
87
+ **kwargs: Any,
88
+ ) -> None:
89
+ if isinstance(config, adalora.AdaLoraConfig):
90
+ adalora.AdaLoraModel._create_and_replace(self, config, *args, **kwargs)
91
+ elif isinstance(config, lora.LoraConfig):
92
+ lora.LoraModel._create_and_replace(self, config, *args, **kwargs)
93
+ elif isinstance(config, loha.LoHaConfig):
94
+ loha.LoHaModel._create_and_replace(self, config, *args, **kwargs)
95
+ elif isinstance(config, lokr.LoKrConfig):
96
+ lokr.LoKrModel._create_and_replace(self, config, *args, **kwargs)
97
+ elif isinstance(config, oft.OFTConfig):
98
+ oft.OFTModel._create_and_replace(self, config, *args, **kwargs)
99
+ else:
100
+ raise ValueError(f"Unsupported config type {type(config)}, should be one of {COMPATIBLE_TUNER_TYPES}.")
101
+
102
+ def _replace_module(self, parent, child_name, new_module, child) -> None:
103
+ setattr(parent, child_name, new_module)
104
+ # It's not necessary to set requires_grad here, as that is handled by
105
+ # _mark_only_adapters_as_trainable
106
+
107
+ # child layer wraps the original module, unpack it
108
+ if hasattr(child, "base_layer"):
109
+ child = child.get_base_layer()
110
+ elif hasattr(child, "quant_linear_module"):
111
+ # TODO maybe not necessary to have special treatment?
112
+ child = child.quant_linear_module
113
+
114
+ if not hasattr(new_module, "base_layer"):
115
+ new_module.weight = child.weight
116
+ if hasattr(child, "bias"):
117
+ new_module.bias = child.bias
118
+
119
+ if getattr(child, "state", None) is not None:
120
+ if hasattr(new_module, "base_layer"):
121
+ new_module.base_layer.state = child.state
122
+ else:
123
+ new_module.state = child.state
124
+ new_module.to(child.weight.device)
125
+
126
+ # dispatch to correct device
127
+ for name, module in new_module.named_modules():
128
+ if any(prefix in name for prefix in PREFIXES):
129
+ module.to(child.weight.device)
130
+ if "ranknum" in name:
131
+ module.to(child.weight.device)
132
+
133
+ def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None:
134
+ for n, p in model.named_parameters():
135
+ if not any(prefix in n for prefix in PREFIXES):
136
+ p.requires_grad = False
137
+
138
+ for active_adapter in self.active_adapters:
139
+ bias = getattr(self.peft_config[active_adapter], "bias", "none")
140
+ if bias == "none":
141
+ continue
142
+
143
+ if bias == "all":
144
+ for n, p in model.named_parameters():
145
+ if "bias" in n:
146
+ p.requires_grad = True
147
+ elif bias == "lora_only":
148
+ # TODO: check if this is needed for other supported types
149
+ for m in model.modules():
150
+ if isinstance(m, Layers) and hasattr(m, "bias") and m.bias is not None:
151
+ m.bias.requires_grad = True
152
+ else:
153
+ raise ValueError(f"Requested bias: {bias}, is not implemented.")
154
+
155
+ @staticmethod
156
+ def _create_new_module(config, adapter_name, target, **kwargs):
157
+ gptq_quantization_config = kwargs.get("gptq_quantization_config", None)
158
+ AutoGPTQQuantLinear = get_auto_gptq_quant_linear(gptq_quantization_config)
159
+ if (gptq_quantization_config is not None) or (AutoGPTQQuantLinear is not None):
160
+ raise ValueError(f"GPTQ quantization not supported for {config.peft_type.value} (yet).")
161
+
162
+ loaded_in_8bit = kwargs.pop("loaded_in_8bit", False)
163
+ loaded_in_4bit = kwargs.pop("loaded_in_4bit", False)
164
+ if loaded_in_8bit or loaded_in_4bit:
165
+ raise ValueError(f"8bit and 4bit quantization not supported for {config.peft_type.value} (yet).")
166
+
167
+ if isinstance(config, adalora.AdaLoraConfig):
168
+ new_module = adalora.AdaLoraModel._create_new_module(config, adapter_name, target, **kwargs)
169
+ elif isinstance(config, lora.LoraConfig):
170
+ new_module = lora.LoraModel._create_new_module(config, adapter_name, target, **kwargs)
171
+ elif isinstance(config, loha.LoHaConfig):
172
+ new_module = loha.LoHaModel._create_new_module(config, adapter_name, target, **kwargs)
173
+ elif isinstance(config, lokr.LoKrConfig):
174
+ new_module = lokr.LoKrModel._create_new_module(config, adapter_name, target, **kwargs)
175
+ elif isinstance(config, oft.OFTConfig):
176
+ new_module = oft.OFTModel._create_new_module(config, adapter_name, target, **kwargs)
177
+ else:
178
+ raise ValueError(f"Unknown config type {type(config)}, should be one of {COMPATIBLE_TUNER_TYPES}.")
179
+ return new_module
180
+
181
+ def __getattr__(self, name: str):
182
+ """Forward missing attributes to the wrapped module."""
183
+ try:
184
+ return super().__getattr__(name) # defer to nn.Module's logic
185
+ except AttributeError:
186
+ return getattr(self.model, name)
187
+
188
+ def _set_adapter_layers(self, enabled=True):
189
+ for module in self.model.modules():
190
+ if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)):
191
+ module.enable_adapters(enabled)
192
+
193
+ def enable_adapter_layers(self):
194
+ self._set_adapter_layers(enabled=True)
195
+
196
+ def disable_adapter_layers(self):
197
+ for active_adapter in self.active_adapters:
198
+ val = getattr(self.peft_config[active_adapter], "bias", "none")
199
+ if val != "none":
200
+ msg = (
201
+ f"Careful, disabling adapter layers with bias configured to be '{val}' does not produce the same "
202
+ "output as the the base model would without adaption."
203
+ )
204
+ warnings.warn(msg)
205
+ self._set_adapter_layers(enabled=False)
206
+
207
+ def set_adapter(self, adapter_name: Union[str, list[str]]) -> None:
208
+ for module in self.model.modules():
209
+ if isinstance(module, Layers):
210
+ if module.merged:
211
+ warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.")
212
+ module.unmerge()
213
+ module.set_adapter(adapter_name)
214
+ self.active_adapter = adapter_name
215
+
216
+ @staticmethod
217
+ def _prepare_adapter_config(peft_config, model_config):
218
+ if peft_config.target_modules is None:
219
+ if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING:
220
+ raise ValueError("Please specify `target_modules` in `peft_config`")
221
+
222
+ peft_config.target_modules = set(
223
+ TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config["model_type"]]
224
+ )
225
+ return peft_config
226
+
227
+ def _unload_and_optionally_merge(
228
+ self,
229
+ merge=True,
230
+ progressbar: bool = False,
231
+ safe_merge: bool = False,
232
+ adapter_names: Optional[list[str]] = None,
233
+ ):
234
+ if merge:
235
+ if getattr(self.model, "quantization_method", None) == "gptq":
236
+ raise ValueError("Cannot merge layers when the model is gptq quantized")
237
+
238
+ def merge_recursively(module):
239
+ # helper function to recursively merge the base_layer of the target
240
+ path = []
241
+ layer = module
242
+ while hasattr(layer, "base_layer"):
243
+ path.append(layer)
244
+ layer = layer.base_layer
245
+ for layer_before, layer_after in zip(path[:-1], path[1:]):
246
+ layer_after.merge(safe_merge=safe_merge, adapter_names=adapter_names)
247
+ layer_before.base_layer = layer_after.base_layer
248
+ module.merge(safe_merge=safe_merge, adapter_names=adapter_names)
249
+
250
+ key_list = [key for key, _ in self.model.named_modules() if not any(prefix in key for prefix in PREFIXES)]
251
+ desc = "Unloading " + ("and merging " if merge else "") + "model"
252
+
253
+ for key in tqdm(key_list, disable=not progressbar, desc=desc):
254
+ try:
255
+ parent, target, target_name = _get_submodules(self.model, key)
256
+ except AttributeError:
257
+ continue
258
+
259
+ if hasattr(target, "base_layer"):
260
+ if merge:
261
+ merge_recursively(target)
262
+ self._replace_module(parent, target_name, target.get_base_layer(), target)
263
+ elif isinstance(target, ModulesToSaveWrapper):
264
+ # save any additional trainable modules part of `modules_to_save`
265
+ new_module = target.modules_to_save[target.active_adapter]
266
+ if hasattr(new_module, "base_layer"):
267
+ # check if the module is itself a tuner layer
268
+ if merge:
269
+ new_module.merge(safe_merge=safe_merge, adapter_names=adapter_names)
270
+ new_module = new_module.get_base_layer()
271
+ setattr(parent, target_name, new_module)
272
+
273
+ return self.model
274
+
275
+ def add_weighted_adapter(self, *args: Any, **kwargs: Any) -> None:
276
+ raise NotImplementedError(f"Weighted adapters are not supported for {self.__class__.__name__} (yet).")
277
+
278
+ def delete_adapter(self, adapter_name: Union[str, list[str]]) -> None:
279
+ """
280
+ Deletes an existing adapter.
281
+
282
+ Args:
283
+ adapter_name (Union[str, list[str]]): Name of the adapter(s) to delete.
284
+ """
285
+ if isinstance(adapter_name, str):
286
+ adapter_names = [adapter_name]
287
+ else:
288
+ adapter_names = adapter_name
289
+
290
+ mismatched = set(adapter_names) - set(self.peft_config.keys())
291
+ if mismatched:
292
+ raise ValueError(
293
+ f"Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}"
294
+ )
295
+
296
+ for adapter_name in adapter_names:
297
+ del self.peft_config[adapter_name]
298
+
299
+ key_list = [key for key, _ in self.model.named_modules() if not any(prefix in key for prefix in PREFIXES)]
300
+ new_adapter = None
301
+ for key in key_list:
302
+ _, target, _ = _get_submodules(self.model, key)
303
+ if isinstance(target, BaseTunerLayer):
304
+ target.delete_adapter(adapter_name)
305
+ if new_adapter is None:
306
+ new_adapter = target.active_adapters[:]
307
+
308
+ self.active_adapter = new_adapter or []
309
+
310
+ def merge_and_unload(
311
+ self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None
312
+ ) -> nn.Module:
313
+ r"""
314
+ This method merges the layers into the base model. This is needed if someone wants to use the base model as a
315
+ standalone model.
316
+
317
+ Args:
318
+ progressbar (`bool`):
319
+ whether to show a progressbar indicating the unload and merge process
320
+ safe_merge (`bool`):
321
+ whether to activate the safe merging check to check if there is any potential Nan in the adapter
322
+ weights
323
+ adapter_names (`List[str]`, *optional*):
324
+ The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
325
+ to `None`.
326
+ """
327
+ return self._unload_and_optionally_merge(
328
+ progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names
329
+ )
330
+
331
+ def unload(self) -> nn.Module:
332
+ """
333
+ Gets back the base model by removing all the lora modules without merging. This gives back the original base
334
+ model.
335
+ """
336
+ return self._unload_and_optionally_merge(merge=False)
337
+
338
+ def generate(self, *args: Any, **kwargs: Any):
339
+ return self.model.generate(*args, **kwargs)
llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .config import MultitaskPromptTuningConfig, MultitaskPromptTuningInit
16
+ from .model import MultitaskPromptEmbedding
17
+
18
+
19
+ __all__ = ["MultitaskPromptTuningConfig", "MultitaskPromptTuningInit", "MultitaskPromptEmbedding"]
llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (392 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/config.cpython-310.pyc ADDED
Binary file (1.84 kB). View file
 
llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/model.cpython-310.pyc ADDED
Binary file (2.43 kB). View file
 
llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/config.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import enum
16
+ from dataclasses import dataclass, field
17
+ from typing import Optional, Union
18
+
19
+ from peft.tuners.prompt_tuning import PromptTuningConfig
20
+ from peft.utils import PeftType
21
+
22
+
23
+ class MultitaskPromptTuningInit(str, enum.Enum):
24
+ # initialize prompt with text
25
+ TEXT = "TEXT"
26
+ # initialize prompt with random matrix
27
+ RANDOM = "RANDOM"
28
+ # average the prefix and column matrices obtained during source training
29
+ AVERAGE_SOURCE_TASKS = "AVERAGE_SOURCE_TASKS"
30
+ # pick prefix and column matrices for a particular task obtained during source training
31
+ EXACT_SOURCE_TASK = "EXACT_SOURCE_TASK"
32
+ # only use the prompt embeddings trained during source training
33
+ ONLY_SOURCE_SHARED = "ONLY_SOURCE_SHARED"
34
+
35
+
36
+ @dataclass
37
+ class MultitaskPromptTuningConfig(PromptTuningConfig):
38
+ prompt_tuning_init: Union[MultitaskPromptTuningInit, str] = field(
39
+ default=MultitaskPromptTuningInit.RANDOM,
40
+ metadata={
41
+ "help": (
42
+ "How to initialize the prompt tuning parameters. Can be one of TEXT, RANDOM, AVERAGE_SOURCE_TASKS, "
43
+ "EXACT_SOURCE_TASK, ONLY_SOURCE_SHARED."
44
+ ),
45
+ },
46
+ )
47
+ prompt_tuning_init_state_dict_path: Optional[str] = field(
48
+ default=None,
49
+ metadata={
50
+ "help": (
51
+ "The path of source state dict. This is required when training the downstream target prompt from "
52
+ "the pretrained source prompt"
53
+ ),
54
+ },
55
+ )
56
+ prompt_tuning_init_task: Optional[int] = field(default=0, metadata={"help": "source task id for initialization"})
57
+ num_ranks: Optional[int] = field(default=1, metadata={"help": "ranks"})
58
+ num_tasks: Optional[int] = field(default=1, metadata={"help": "number of tasks"})
59
+
60
+ def __post_init__(self):
61
+ self.peft_type = PeftType.MULTITASK_PROMPT_TUNING
llmeval-env/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/model.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import torch
16
+
17
+ from peft.tuners.prompt_tuning import PromptEmbedding
18
+ from peft.utils import TaskType
19
+
20
+ from .config import MultitaskPromptTuningConfig, MultitaskPromptTuningInit
21
+
22
+
23
+ # This code is adapted for the paper: https://arxiv.org/abs/2303.02861 and
24
+ # constitutes the work done at MIT-IBM Watson Research Lab.
25
+
26
+
27
+ class MultitaskPromptEmbedding(PromptEmbedding):
28
+ def __init__(self, config: MultitaskPromptTuningConfig, word_embeddings):
29
+ super().__init__(config, word_embeddings)
30
+
31
+ self.num_tasks = config.num_tasks
32
+ self.num_ranks = config.num_ranks
33
+ self.num_virtual_tokens = config.num_virtual_tokens
34
+
35
+ self.num_transformer_submodules = config.num_transformer_submodules
36
+ if self.num_transformer_submodules is None:
37
+ self.num_transformer_submodules = 2 if config.task_type == TaskType.SEQ_2_SEQ_LM else 1
38
+
39
+ self.token_dim = config.token_dim
40
+
41
+ total_virtual_tokens = self.num_virtual_tokens * self.num_transformer_submodules
42
+
43
+ self.prefix_task_cols = torch.nn.Parameter(
44
+ torch.normal(
45
+ mean=0,
46
+ std=0.02,
47
+ size=(self.num_tasks, total_virtual_tokens, self.num_ranks),
48
+ )
49
+ )
50
+ self.prefix_task_rows = torch.nn.Parameter(
51
+ torch.normal(
52
+ mean=0,
53
+ std=0.02,
54
+ size=(self.num_tasks, self.num_ranks, self.token_dim),
55
+ )
56
+ )
57
+
58
+ if config.prompt_tuning_init in [
59
+ MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS,
60
+ MultitaskPromptTuningInit.EXACT_SOURCE_TASK,
61
+ MultitaskPromptTuningInit.ONLY_SOURCE_SHARED,
62
+ ]:
63
+ if config.prompt_tuning_init_state_dict_path is None:
64
+ raise ValueError(
65
+ f"prompt_tuning_init_state_dict_path needs to be specified with {config.prompt_tuning_init} "
66
+ "init method"
67
+ )
68
+
69
+ # TODO: There should be an option for safetensors
70
+ state_dict: dict = torch.load(
71
+ config.prompt_tuning_init_state_dict_path,
72
+ map_location=word_embeddings.weight.device,
73
+ )
74
+
75
+ if config.prompt_tuning_init in [
76
+ MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS,
77
+ MultitaskPromptTuningInit.EXACT_SOURCE_TASK,
78
+ ]:
79
+ prefix_task_cols_: torch.Tensor = state_dict["prefix_task_cols"]
80
+ prefix_task_rows_: torch.Tensor = state_dict["prefix_task_rows"]
81
+
82
+ if config.prompt_tuning_init == MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS:
83
+ prefix_task_cols_ = prefix_task_cols_.mean(0, keepdim=True)
84
+ prefix_task_rows_ = prefix_task_rows_.mean(0, keepdim=True)
85
+ elif config.prompt_tuning_init == MultitaskPromptTuningInit.EXACT_SOURCE_TASK:
86
+ prefix_task_cols_ = prefix_task_cols_[config.prompt_tuning_init_task, ...].unsqueeze(0)
87
+ prefix_task_rows_ = prefix_task_rows_[config.prompt_tuning_init_task, ...].unsqueeze(0)
88
+
89
+ state_dict = {
90
+ "embedding.weight": state_dict["prompt_embeddings"],
91
+ "prefix_task_cols": prefix_task_cols_,
92
+ "prefix_task_rows": prefix_task_rows_,
93
+ }
94
+
95
+ self.load_state_dict(state_dict, strict=True)
96
+ elif config.prompt_tuning_init == MultitaskPromptTuningInit.ONLY_SOURCE_SHARED:
97
+ state_dict = {
98
+ "embedding.weight": state_dict["prompt_embeddings"],
99
+ }
100
+
101
+ self.load_state_dict(state_dict, strict=False)
102
+
103
+ def forward(self, indices, task_ids):
104
+ if task_ids is None:
105
+ raise ValueError("task_ids cannot be None")
106
+
107
+ prompt_embeddings = self.embedding(indices)
108
+
109
+ task_cols = torch.index_select(self.prefix_task_cols, 0, task_ids)
110
+ task_rows = torch.index_select(self.prefix_task_rows, 0, task_ids)
111
+ task_prompts = torch.matmul(task_cols, task_rows)
112
+
113
+ prompt_embeddings *= task_prompts
114
+
115
+ return prompt_embeddings