applied-ai-018 commited on
Commit
8fb23b1
·
verified ·
1 Parent(s): 1d0bd1d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/11.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step40/zero/16.attention.dense.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step40/zero/21.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  4. ckpts/universal/global_step40/zero/21.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  5. ckpts/universal/global_step40/zero/24.post_attention_layernorm.weight/exp_avg.pt +3 -0
  6. ckpts/universal/global_step40/zero/24.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
  7. ckpts/universal/global_step40/zero/6.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  8. ckpts/universal/global_step40/zero/8.post_attention_layernorm.weight/exp_avg.pt +3 -0
  9. ckpts/universal/global_step40/zero/8.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
  10. venv/lib/python3.10/site-packages/peft/tuners/adalora/__init__.py +37 -0
  11. venv/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/__init__.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/bnb.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/config.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/gptq.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/layer.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/model.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/peft/tuners/adalora/bnb.py +145 -0
  18. venv/lib/python3.10/site-packages/peft/tuners/adalora/config.py +52 -0
  19. venv/lib/python3.10/site-packages/peft/tuners/adalora/gptq.py +72 -0
  20. venv/lib/python3.10/site-packages/peft/tuners/adalora/layer.py +347 -0
  21. venv/lib/python3.10/site-packages/peft/tuners/adalora/model.py +346 -0
  22. venv/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__init__.py +19 -0
  23. venv/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/__init__.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/config.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/layer.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/model.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/utils.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/peft/tuners/adaption_prompt/config.py +80 -0
  29. venv/lib/python3.10/site-packages/peft/tuners/adaption_prompt/layer.py +128 -0
  30. venv/lib/python3.10/site-packages/peft/tuners/adaption_prompt/model.py +161 -0
  31. venv/lib/python3.10/site-packages/peft/tuners/adaption_prompt/utils.py +121 -0
  32. venv/lib/python3.10/site-packages/peft/tuners/loha/__init__.py +20 -0
  33. venv/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/__init__.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/config.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/layer.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/model.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/peft/tuners/loha/config.py +121 -0
  38. venv/lib/python3.10/site-packages/peft/tuners/loha/layer.py +375 -0
  39. venv/lib/python3.10/site-packages/peft/tuners/loha/model.py +114 -0
  40. venv/lib/python3.10/site-packages/peft/tuners/lokr/__init__.py +20 -0
  41. venv/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/__init__.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/config.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/layer.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/model.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/peft/tuners/lokr/config.py +127 -0
  46. venv/lib/python3.10/site-packages/peft/tuners/lokr/layer.py +409 -0
  47. venv/lib/python3.10/site-packages/peft/tuners/lokr/model.py +115 -0
  48. venv/lib/python3.10/site-packages/peft/tuners/lora/__init__.py +37 -0
  49. venv/lib/python3.10/site-packages/peft/tuners/lora/aqlm.py +100 -0
  50. venv/lib/python3.10/site-packages/peft/tuners/lora/awq.py +108 -0
ckpts/universal/global_step40/zero/11.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae69ae11da19eb3f0f90e38b82735b1b4d6b61687595d3b2cb49fc43cfe375bf
3
+ size 33555627
ckpts/universal/global_step40/zero/16.attention.dense.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50a914a10bf528ac25f73ba96084c0de9bea41ec4b9b740c04f018ed8d7b89d8
3
+ size 16778317
ckpts/universal/global_step40/zero/21.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb2aeeebc268f877260c4feeefda1580ed4594768670f5d9eca792067e80e2d5
3
+ size 33555627
ckpts/universal/global_step40/zero/21.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36a9dc3593d1611889dc3563785301f13bb18607da4ef980650fed26c3da7fbe
3
+ size 33555533
ckpts/universal/global_step40/zero/24.post_attention_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f062c10e2d5672e81b675d65059cc85268f9d1191fafac540a991c646c9e658f
3
+ size 9372
ckpts/universal/global_step40/zero/24.post_attention_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43dbb9380349345c0c2f05c96223a0a9e2329da9ed5abccef4d393c32b97801f
3
+ size 9387
ckpts/universal/global_step40/zero/6.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad3b5928a3a937caab1f83c2dbacf5a509a73bc3d28c972d55025464fed08d9a
3
+ size 33555533
ckpts/universal/global_step40/zero/8.post_attention_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d04241237a4405155f28734b4ce383698b7057cee07580775a0a0440a67e8b5
3
+ size 9372
ckpts/universal/global_step40/zero/8.post_attention_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8968dec1c28b7739a876894a92ecbf08b36a20e52c3327b016d37119bf2676cf
3
+ size 9387
venv/lib/python3.10/site-packages/peft/tuners/adalora/__init__.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from peft.import_utils import is_bnb_4bit_available, is_bnb_available
16
+
17
+ from .config import AdaLoraConfig
18
+ from .gptq import SVDQuantLinear
19
+ from .layer import AdaLoraLayer, RankAllocator, SVDLinear
20
+ from .model import AdaLoraModel
21
+
22
+
23
+ __all__ = ["AdaLoraConfig", "AdaLoraLayer", "AdaLoraModel", "SVDLinear", "RankAllocator", "SVDQuantLinear"]
24
+
25
+
26
+ def __getattr__(name):
27
+ if (name == "SVDLinear8bitLt") and is_bnb_available():
28
+ from .bnb import SVDLinear8bitLt
29
+
30
+ return SVDLinear8bitLt
31
+
32
+ if (name == "SVDLinear4bit") and is_bnb_4bit_available():
33
+ from .bnb import SVDLinear4bit
34
+
35
+ return SVDLinear4bit
36
+
37
+ raise AttributeError(f"module {__name__} has no attribute {name}")
venv/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (883 Bytes). View file
 
venv/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/bnb.cpython-310.pyc ADDED
Binary file (3.18 kB). View file
 
venv/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/config.cpython-310.pyc ADDED
Binary file (2.4 kB). View file
 
venv/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/gptq.cpython-310.pyc ADDED
Binary file (1.62 kB). View file
 
venv/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/layer.cpython-310.pyc ADDED
Binary file (10.3 kB). View file
 
venv/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/model.cpython-310.pyc ADDED
Binary file (9.75 kB). View file
 
venv/lib/python3.10/site-packages/peft/tuners/adalora/bnb.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Any
16
+
17
+ import torch
18
+
19
+ from peft.import_utils import is_bnb_4bit_available, is_bnb_available
20
+
21
+ from .layer import AdaLoraLayer
22
+
23
+
24
+ if is_bnb_available():
25
+
26
+ class SVDLinear8bitLt(torch.nn.Module, AdaLoraLayer):
27
+ # Low-rank matrix for SVD-based adaptation
28
+ def __init__(
29
+ self,
30
+ base_layer: torch.nn.Module,
31
+ adapter_name: str,
32
+ r: int = 0,
33
+ lora_alpha: int = 1,
34
+ lora_dropout: float = 0.0,
35
+ init_lora_weights: bool = True,
36
+ **kwargs,
37
+ ) -> None:
38
+ super().__init__()
39
+ AdaLoraLayer.__init__(self, base_layer)
40
+ # Freezing the pre-trained weight matrix
41
+ self.get_base_layer().weight.requires_grad = False
42
+
43
+ self._active_adapter = adapter_name
44
+ self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
45
+
46
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
47
+ # note: no check for self.merged because merging is not supported (yet)
48
+ result = self.base_layer(x)
49
+
50
+ if self.disable_adapters:
51
+ return result
52
+
53
+ for active_adapter in self.active_adapters:
54
+ if active_adapter not in self.lora_A.keys():
55
+ continue
56
+ requires_conversion = not torch.is_autocast_enabled()
57
+ if requires_conversion:
58
+ expected_dtype = result.dtype
59
+ if x.dtype != torch.float32:
60
+ x = x.float()
61
+
62
+ lora_A = self.lora_A[active_adapter]
63
+ lora_B = self.lora_B[active_adapter]
64
+ lora_E = self.lora_E[active_adapter]
65
+ dropout = self.lora_dropout[active_adapter]
66
+ scaling = self.scaling[active_adapter]
67
+ ranknum = self.ranknum[active_adapter] + 1e-5
68
+
69
+ output = dropout(x) @ (lora_A * lora_E).T @ lora_B.T
70
+ if requires_conversion:
71
+ output = output.to(expected_dtype)
72
+ output = output * scaling / ranknum
73
+ # inplace operation on view is forbidden for MatMul8bitLtBackward, so avoid it
74
+ result = result + output
75
+ return result
76
+
77
+ def __repr__(self) -> str:
78
+ rep = super().__repr__()
79
+ return "adalora." + rep
80
+
81
+
82
+ if is_bnb_4bit_available():
83
+
84
+ class SVDLinear4bit(torch.nn.Module, AdaLoraLayer):
85
+ # Low-rank matrix for SVD-based adaptation
86
+ def __init__(
87
+ self,
88
+ base_layer: torch.nn.Module,
89
+ adapter_name: str,
90
+ r: int = 0,
91
+ lora_alpha: int = 1,
92
+ lora_dropout: float = 0.0,
93
+ init_lora_weights: bool = True,
94
+ **kwargs,
95
+ ) -> None:
96
+ super().__init__()
97
+ AdaLoraLayer.__init__(self, base_layer)
98
+ # Freezing the pre-trained weight matrix
99
+ self.get_base_layer().weight.requires_grad = False
100
+
101
+ self._active_adapter = adapter_name
102
+ self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
103
+
104
+ def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
105
+ # note: no check for self.merged because merging is not supported (yet)
106
+ result = self.base_layer(x, *args, **kwargs)
107
+
108
+ if self.disable_adapters:
109
+ return result
110
+
111
+ # As per Tim Dettmers, for 4bit, we need to defensively clone here.
112
+ # The reason is that in some cases, an error can occur that backprop
113
+ # does not work on a manipulated view. This issue may be solved with
114
+ # newer PyTorch versions but this would need extensive testing to be
115
+ # sure.
116
+ result = result.clone()
117
+
118
+ for active_adapter in self.active_adapters:
119
+ if active_adapter not in self.lora_A.keys():
120
+ continue
121
+
122
+ lora_A = self.lora_A[active_adapter]
123
+ lora_B = self.lora_B[active_adapter]
124
+ lora_E = self.lora_E[active_adapter]
125
+ dropout = self.lora_dropout[active_adapter]
126
+ scaling = self.scaling[active_adapter]
127
+ ranknum = self.ranknum[active_adapter] + 1e-5
128
+
129
+ requires_conversion = not torch.is_autocast_enabled()
130
+ if requires_conversion:
131
+ expected_dtype = result.dtype
132
+ compute_dtype = lora_A.dtype
133
+ if x.dtype != compute_dtype:
134
+ x = x.to(compute_dtype)
135
+
136
+ output = dropout(x) @ (lora_A * lora_E).T @ lora_B.T
137
+ if requires_conversion:
138
+ output = output.to(expected_dtype)
139
+ output = output * scaling / ranknum
140
+ result += output
141
+ return result
142
+
143
+ def __repr__(self) -> str:
144
+ rep = super().__repr__()
145
+ return "adalora." + rep
venv/lib/python3.10/site-packages/peft/tuners/adalora/config.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from dataclasses import dataclass, field
16
+ from typing import Optional
17
+
18
+ from peft.tuners.lora import LoraConfig
19
+ from peft.utils import PeftType
20
+
21
+
22
+ @dataclass
23
+ class AdaLoraConfig(LoraConfig):
24
+ """
25
+ This is the configuration class to store the configuration of a [`~peft.AdaLora`].
26
+
27
+ Args:
28
+ target_r (`int`): The target average rank of incremental matrix.
29
+ init_r (`int`): The initial rank for each incremental matrix.
30
+ tinit (`int`): The steps of initial fine-tuning warmup.
31
+ tfinal (`int`): The step of final fine-tuning.
32
+ deltaT (`int`): The time internval between two budget allocations.
33
+ beta1 (`float`): The hyperparameter of EMA for sensitivity smoothing.
34
+ beta2 (`float`): The hyperparameter of EMA for undertainty quantification.
35
+ orth_reg_weight (`float`): The coefficient of orthogonal regularization.
36
+ total_step (`int`): The total training steps that should be specified before training.
37
+ rank_pattern (`list`): The allocated rank for each weight matrix by RankAllocator.
38
+ """
39
+
40
+ target_r: int = field(default=8, metadata={"help": "Target Lora matrix dimension."})
41
+ init_r: int = field(default=12, metadata={"help": "Initial Lora matrix dimension."})
42
+ tinit: int = field(default=0, metadata={"help": "The steps of initial warmup."})
43
+ tfinal: int = field(default=0, metadata={"help": "The steps of final warmup."})
44
+ deltaT: int = field(default=1, metadata={"help": "Step interval of rank allocation."})
45
+ beta1: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."})
46
+ beta2: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."})
47
+ orth_reg_weight: float = field(default=0.5, metadata={"help": "The orthogonal regularization coefficient."})
48
+ total_step: Optional[int] = field(default=None, metadata={"help": "The total training steps."})
49
+ rank_pattern: Optional[dict] = field(default=None, metadata={"help": "The saved rank pattern."})
50
+
51
+ def __post_init__(self):
52
+ self.peft_type = PeftType.ADALORA
venv/lib/python3.10/site-packages/peft/tuners/adalora/gptq.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import torch
15
+
16
+ from .layer import AdaLoraLayer
17
+
18
+
19
+ class SVDQuantLinear(torch.nn.Module, AdaLoraLayer):
20
+ def __init__(
21
+ self,
22
+ base_layer,
23
+ adapter_name,
24
+ r: int = 0,
25
+ lora_alpha: int = 1,
26
+ lora_dropout: float = 0.0,
27
+ init_lora_weights: bool = True,
28
+ **kwargs,
29
+ ) -> None:
30
+ super().__init__()
31
+ AdaLoraLayer.__init__(self, base_layer)
32
+
33
+ # self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter
34
+ # for backwards compatibility
35
+ self.quant_linear_module = base_layer
36
+ self._active_adapter = adapter_name
37
+ self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
38
+
39
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
40
+ result = self.quant_linear_module(x)
41
+
42
+ if self.disable_adapters:
43
+ return result
44
+
45
+ for active_adapter in self.active_adapters:
46
+ if active_adapter not in self.lora_A.keys():
47
+ continue
48
+ lora_A = self.lora_A[active_adapter]
49
+ lora_B = self.lora_B[active_adapter]
50
+ lora_E = self.lora_E[active_adapter]
51
+ dropout = self.lora_dropout[active_adapter]
52
+ scaling = self.scaling[active_adapter]
53
+ ranknum = self.ranknum[active_adapter] + 1e-5
54
+
55
+ requires_conversion = not torch.is_autocast_enabled()
56
+ if requires_conversion:
57
+ expected_dtype = result.dtype
58
+ if x.dtype != torch.float32:
59
+ x = x.float()
60
+
61
+ output = (dropout(x) @ (lora_A * lora_E).T @ lora_B.T) * scaling / ranknum
62
+ # TODO: here, the dtype conversion is applied on the *whole expression*,
63
+ # not the intermediate result, unlike for SVDLinear8bitLT and
64
+ # SVDLinear4bit, is that correct?
65
+ if requires_conversion:
66
+ output = output.to(expected_dtype)
67
+ result += output
68
+ return result
69
+
70
+ def __repr__(self) -> str:
71
+ rep = super().__repr__()
72
+ return "adalora." + rep
venv/lib/python3.10/site-packages/peft/tuners/adalora/layer.py ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import warnings
16
+ from typing import Any, List, Optional
17
+
18
+ import torch
19
+ from torch import nn
20
+
21
+ from peft.tuners.lora import LoraLayer
22
+ from peft.tuners.tuners_utils import check_adapters_to_merge
23
+ from peft.utils import transpose
24
+
25
+
26
+ class AdaLoraLayer(LoraLayer):
27
+ # List all names of layers that may contain adapter weights
28
+ # Note: ranknum doesn't need to be included as it is not an nn.Module
29
+ adapter_layer_names = ("lora_A", "lora_B", "lora_E", "lora_embedding_A", "lora_embedding_B")
30
+ # other_param_names is defined in LoraLayer
31
+
32
+ def __init__(self, base_layer: nn.Module) -> None:
33
+ super().__init__(base_layer)
34
+ self.lora_E = nn.ParameterDict({})
35
+ self.lora_A = nn.ParameterDict({})
36
+ self.lora_B = nn.ParameterDict({})
37
+ self.ranknum = nn.ParameterDict({})
38
+
39
+ def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights):
40
+ if r < 0:
41
+ # note: r == 0 is allowed for AdaLora, see #1539
42
+ raise ValueError(f"`r` should be a positive integer or 0, but the value passed is {r}")
43
+
44
+ self.r[adapter_name] = r
45
+ self.lora_alpha[adapter_name] = lora_alpha
46
+ if lora_dropout > 0.0:
47
+ lora_dropout_layer = nn.Dropout(p=lora_dropout)
48
+ else:
49
+ lora_dropout_layer = nn.Identity()
50
+
51
+ self.lora_dropout[adapter_name] = lora_dropout_layer
52
+ # Actual trainable parameters
53
+ # Right singular vectors
54
+ self.lora_A[adapter_name] = nn.Parameter(torch.randn(r, self.in_features))
55
+ # Singular values
56
+ self.lora_E[adapter_name] = nn.Parameter(torch.randn(r, 1))
57
+ # Left singular vectors
58
+ self.lora_B[adapter_name] = nn.Parameter(torch.randn(self.out_features, r))
59
+ # The current rank
60
+ self.ranknum[adapter_name] = nn.Parameter(torch.randn(1), requires_grad=False)
61
+ self.ranknum[adapter_name].data.fill_(float(r))
62
+ self.ranknum[adapter_name].requires_grad = False
63
+ self.scaling[adapter_name] = lora_alpha if lora_alpha > 0 else float(r)
64
+ if init_lora_weights:
65
+ self.reset_lora_parameters(adapter_name)
66
+
67
+ if hasattr(self.get_base_layer(), "qweight"):
68
+ # QuantLinear
69
+ self.to(self.get_base_layer().qweight.device)
70
+ else:
71
+ self.to(self.get_base_layer().weight.device)
72
+ self.set_adapter(self.active_adapters)
73
+
74
+ def reset_lora_parameters(self, adapter_name):
75
+ if adapter_name in self.lora_A.keys():
76
+ nn.init.normal_(self.lora_E[adapter_name], mean=0.0, std=0.02)
77
+ nn.init.normal_(self.lora_A[adapter_name], mean=0.0, std=0.02)
78
+ nn.init.normal_(self.lora_B[adapter_name], mean=0.0, std=0.02)
79
+
80
+
81
+ class SVDLinear(nn.Module, AdaLoraLayer):
82
+ # SVD-based adaptation by a dense layer
83
+ def __init__(
84
+ self,
85
+ base_layer: nn.Module,
86
+ adapter_name: str,
87
+ r: int = 0,
88
+ lora_alpha: int = 1,
89
+ lora_dropout: float = 0.0,
90
+ fan_in_fan_out: bool = False,
91
+ init_lora_weights: bool = True,
92
+ **kwargs,
93
+ ) -> None:
94
+ super().__init__()
95
+ AdaLoraLayer.__init__(self, base_layer)
96
+ # Freezing the pre-trained weight matrix
97
+ self.get_base_layer().weight.requires_grad = False
98
+
99
+ self.fan_in_fan_out = fan_in_fan_out
100
+ self._active_adapter = adapter_name
101
+ self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
102
+
103
+ def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None:
104
+ """
105
+ Merge the active adapter weights into the base weights
106
+
107
+ Args:
108
+ safe_merge (`bool`, *optional*):
109
+ If True, the merge operation will be performed in a copy of the original weights and check for NaNs
110
+ before merging the weights. This is useful if you want to check if the merge operation will produce
111
+ NaNs. Defaults to `False`.
112
+ adapter_names (`List[str]`, *optional*):
113
+ The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
114
+ to `None`.
115
+ """
116
+ adapter_names = check_adapters_to_merge(self, adapter_names)
117
+ if not adapter_names:
118
+ # no adapter to merge
119
+ return
120
+
121
+ for active_adapter in adapter_names:
122
+ base_layer = self.get_base_layer()
123
+ if active_adapter in self.lora_A.keys():
124
+ if safe_merge:
125
+ # Note that safe_merge will be slower than the normal merge
126
+ # because of the copy operation.
127
+ orig_weights = base_layer.weight.data.clone()
128
+ orig_weights += self.get_delta_weight(active_adapter)
129
+
130
+ if not torch.isfinite(orig_weights).all():
131
+ raise ValueError(
132
+ f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
133
+ )
134
+
135
+ base_layer.weight.data = orig_weights
136
+ else:
137
+ base_layer.weight.data += self.get_delta_weight(active_adapter)
138
+ self.merged_adapters.append(active_adapter)
139
+
140
+ def unmerge(self) -> None:
141
+ """
142
+ This method unmerges all merged adapter layers from the base weights.
143
+ """
144
+ if not self.merged:
145
+ warnings.warn("Already unmerged. Nothing to do.")
146
+ return
147
+ while len(self.merged_adapters) > 0:
148
+ active_adapter = self.merged_adapters.pop()
149
+ if active_adapter in self.lora_A.keys():
150
+ self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter)
151
+
152
+ def get_delta_weight(self, adapter) -> torch.Tensor:
153
+ return (
154
+ transpose(self.lora_B[adapter] @ (self.lora_A[adapter] * self.lora_E[adapter]), self.fan_in_fan_out)
155
+ * self.scaling[adapter]
156
+ / (self.ranknum[adapter] + 1e-5)
157
+ )
158
+
159
+ def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
160
+ if self.disable_adapters:
161
+ if self.merged:
162
+ self.unmerge()
163
+ result = self.base_layer(x, *args, **kwargs)
164
+ elif self.merged:
165
+ result = self.base_layer(x, *args, **kwargs)
166
+ else:
167
+ result = self.base_layer(x, *args, **kwargs)
168
+ for active_adapter in self.active_adapters:
169
+ if active_adapter not in self.lora_A.keys():
170
+ continue
171
+ lora_A = self.lora_A[active_adapter]
172
+ lora_B = self.lora_B[active_adapter]
173
+ lora_E = self.lora_E[active_adapter]
174
+ dropout = self.lora_dropout[active_adapter]
175
+ scaling = self.scaling[active_adapter]
176
+ ranknum = self.ranknum[active_adapter] + 1e-5
177
+
178
+ x = x.to(lora_A.dtype)
179
+ result += (dropout(x) @ (lora_A * lora_E).T @ lora_B.T) * scaling / ranknum
180
+
181
+ return result
182
+
183
+ def __repr__(self) -> str:
184
+ rep = super().__repr__()
185
+ return "adalora." + rep
186
+
187
+
188
+ class RankAllocator:
189
+ """
190
+ The RankAllocator for AdaLoraModel. Paper: https://openreview.net/pdf?id=lq62uWRJjiY
191
+
192
+ Args:
193
+ config ([`AdaLoraConfig`]): The configuration of the AdaLora model.
194
+ model: the model that we apply AdaLoRA to.
195
+
196
+ """
197
+
198
+ def __init__(self, model, peft_config, adapter_name):
199
+ self.peft_config = peft_config
200
+ self.adapter_name = adapter_name
201
+ self.beta1 = peft_config.beta1
202
+ self.beta2 = peft_config.beta2
203
+ assert self.beta1 > 0 and self.beta1 < 1
204
+ assert self.beta2 > 0 and self.beta2 < 1
205
+
206
+ self.reset_ipt()
207
+ self._set_budget_scheduler(model)
208
+
209
+ def set_total_step(self, total_step):
210
+ self.peft_config.total_step = total_step
211
+
212
+ def reset_ipt(self):
213
+ self.ipt = {}
214
+ self.exp_avg_ipt = {}
215
+ self.exp_avg_unc = {}
216
+
217
+ def _set_budget_scheduler(self, model):
218
+ self.init_bgt = 0
219
+ self.name_set = set()
220
+ for n, p in model.named_parameters():
221
+ if f"lora_A.{self.adapter_name}" in n:
222
+ self.init_bgt += p.size(0)
223
+ self.name_set.add(n.replace("lora_A", "%s"))
224
+ self.name_set = sorted(self.name_set)
225
+ # The total final rank budget
226
+ self.target_bgt = self.peft_config.target_r * len(self.name_set)
227
+
228
+ def budget_schedule(self, step: int):
229
+ tinit = self.peft_config.tinit
230
+ tfinal = self.peft_config.tfinal
231
+ total_step = self.peft_config.total_step
232
+ # Initial warmup
233
+ if step <= tinit:
234
+ budget = self.init_bgt
235
+ mask_ind = False
236
+ # Final fine-tuning
237
+ elif step > total_step - tfinal:
238
+ budget = self.target_bgt
239
+ mask_ind = True
240
+ else:
241
+ # Budget decreasing with a cubic scheduler
242
+ mul_coeff = 1 - (step - tinit) / (total_step - tfinal - tinit)
243
+ budget = int((self.init_bgt - self.target_bgt) * (mul_coeff**3) + self.target_bgt)
244
+ mask_ind = True if step % self.peft_config.deltaT == 0 else False
245
+ return budget, mask_ind
246
+
247
+ def update_ipt(self, model):
248
+ # Update the sensitivity and uncertainty for every weight
249
+ for n, p in model.named_parameters():
250
+ if "lora_" in n and self.adapter_name in n:
251
+ if n not in self.ipt:
252
+ self.ipt[n] = torch.zeros_like(p)
253
+ self.exp_avg_ipt[n] = torch.zeros_like(p)
254
+ self.exp_avg_unc[n] = torch.zeros_like(p)
255
+ with torch.no_grad():
256
+ self.ipt[n] = (p * p.grad).abs().detach()
257
+ # Sensitivity smoothing
258
+ self.exp_avg_ipt[n] = self.beta1 * self.exp_avg_ipt[n] + (1 - self.beta1) * self.ipt[n]
259
+ # Uncertainty quantification
260
+ self.exp_avg_unc[n] = (
261
+ self.beta2 * self.exp_avg_unc[n] + (1 - self.beta2) * (self.ipt[n] - self.exp_avg_ipt[n]).abs()
262
+ )
263
+
264
+ def _element_score(self, n):
265
+ return self.exp_avg_ipt[n] * self.exp_avg_unc[n]
266
+
267
+ def _combine_ipt(self, ipt_E, ipt_AB):
268
+ ipt_AB = ipt_AB.sum(dim=1, keepdim=False)
269
+ sum_ipt = ipt_E.view(-1) + ipt_AB.view(-1)
270
+ return sum_ipt
271
+
272
+ def mask_to_budget(self, model, budget):
273
+ value_ipt = {}
274
+ vector_ipt = {}
275
+ triplet_ipt = {}
276
+ # Get the importance score for A, E, B
277
+ for n, p in model.named_parameters():
278
+ if f"lora_A.{self.adapter_name}" in n:
279
+ entry_ipt = self._element_score(n)
280
+ comb_ipt = torch.mean(entry_ipt, dim=1, keepdim=True)
281
+ name_m = n.replace("lora_A", "%s")
282
+ if name_m not in vector_ipt:
283
+ vector_ipt[name_m] = [comb_ipt]
284
+ else:
285
+ vector_ipt[name_m].append(comb_ipt)
286
+ if f"lora_B.{self.adapter_name}" in n:
287
+ entry_ipt = self._element_score(n)
288
+ comb_ipt = torch.mean(entry_ipt, dim=0, keepdim=False).view(-1, 1)
289
+ name_m = n.replace("lora_B", "%s")
290
+ if name_m not in vector_ipt:
291
+ vector_ipt[name_m] = [comb_ipt]
292
+ else:
293
+ vector_ipt[name_m].append(comb_ipt)
294
+ if f"lora_E.{self.adapter_name}" in n:
295
+ entry_ipt = self._element_score(n)
296
+ name_m = n.replace("lora_E", "%s")
297
+ value_ipt[name_m] = entry_ipt
298
+
299
+ all_score = []
300
+ # Calculate the score for each triplet
301
+ for name_m in vector_ipt:
302
+ ipt_E = value_ipt[name_m]
303
+ ipt_AB = torch.cat(vector_ipt[name_m], dim=1)
304
+ sum_ipt = self._combine_ipt(ipt_E, ipt_AB)
305
+ name_E = name_m % "lora_E"
306
+ triplet_ipt[name_E] = sum_ipt.view(-1, 1)
307
+ all_score.append(sum_ipt.view(-1))
308
+
309
+ # Get the threshold by ranking ipt
310
+ mask_threshold = torch.kthvalue(
311
+ torch.cat(all_score),
312
+ k=self.init_bgt - budget,
313
+ )[0].item()
314
+
315
+ rank_pattern = {}
316
+ # Mask the unimportant triplets
317
+ with torch.no_grad():
318
+ for n, p in model.named_parameters():
319
+ if f"lora_E.{self.adapter_name}" in n:
320
+ p.masked_fill_(triplet_ipt[n] <= mask_threshold, 0.0)
321
+ rank_pattern[n] = (~(triplet_ipt[n] <= mask_threshold)).view(-1).tolist()
322
+ return rank_pattern
323
+
324
+ def update_and_allocate(self, model, global_step, force_mask=False):
325
+ # # Update the importance score and allocate the budget
326
+ if global_step < self.peft_config.total_step - self.peft_config.tfinal:
327
+ self.update_ipt(model)
328
+ budget, mask_ind = self.budget_schedule(global_step)
329
+ # Allocate the budget according to importance scores
330
+ if mask_ind or force_mask:
331
+ rank_pattern = self.mask_to_budget(model, budget)
332
+ else:
333
+ rank_pattern = None
334
+ return budget, rank_pattern
335
+
336
+ def mask_using_rank_pattern(self, model, rank_pattern):
337
+ # Mask the unimportant triplets
338
+ is_adapter_name_truncated = False
339
+ if self.adapter_name not in next(iter(rank_pattern.keys())):
340
+ is_adapter_name_truncated = True
341
+
342
+ with torch.no_grad():
343
+ for n, p in model.named_parameters():
344
+ if f"lora_E.{self.adapter_name}" in n:
345
+ key = n if not is_adapter_name_truncated else n.replace(f".{self.adapter_name}", "")
346
+ mask = torch.Tensor(rank_pattern[key]).unsqueeze(-1).to(p.device)
347
+ p.masked_fill_(~mask.bool(), 0.0)
venv/lib/python3.10/site-packages/peft/tuners/adalora/model.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import warnings
16
+
17
+ import torch
18
+ from transformers.pytorch_utils import Conv1D
19
+
20
+ from peft.import_utils import is_bnb_4bit_available, is_bnb_available
21
+ from peft.tuners.lora import LoraConfig, LoraModel
22
+ from peft.tuners.tuners_utils import BaseTunerLayer
23
+ from peft.utils import (
24
+ TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING,
25
+ _freeze_adapter,
26
+ _get_submodules,
27
+ get_auto_gptq_quant_linear,
28
+ get_quantization_config,
29
+ )
30
+
31
+ from .gptq import SVDQuantLinear
32
+ from .layer import AdaLoraLayer, RankAllocator, SVDLinear
33
+
34
+
35
+ class AdaLoraModel(LoraModel):
36
+ """
37
+ Creates AdaLoRA (Adaptive LoRA) model from a pretrained transformers model. Paper:
38
+ https://openreview.net/forum?id=lq62uWRJjiY
39
+
40
+ Args:
41
+ model ([`transformers.PreTrainedModel`]): The model to be adapted.
42
+ config ([`AdaLoraConfig`]): The configuration of the AdaLora model.
43
+ adapter_name (`str`): The name of the adapter, defaults to `"default"`.
44
+
45
+ Returns:
46
+ `torch.nn.Module`: The AdaLora model.
47
+
48
+ Example::
49
+
50
+ >>> from transformers import AutoModelForSeq2SeqLM, LoraConfig >>> from peft import AdaLoraModel, AdaLoraConfig
51
+ >>> config = AdaLoraConfig(
52
+ peft_type="ADALORA", task_type="SEQ_2_SEQ_LM", r=8, lora_alpha=32, target_modules=["q", "v"],
53
+ lora_dropout=0.01,
54
+ )
55
+ >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> model = AdaLoraModel(model, config, "default")
56
+
57
+ **Attributes**:
58
+ - **model** ([`transformers.PreTrainedModel`]) -- The model to be adapted.
59
+ - **peft_config** ([`AdaLoraConfig`]): The configuration of the AdaLora model.
60
+ """
61
+
62
+ # Note: don't redefine prefix here, it should be inherited from LoraModel
63
+
64
+ def __init__(self, model, config, adapter_name):
65
+ super().__init__(model, config, adapter_name)
66
+
67
+ traininable_mode_counter = 0
68
+ for config in self.peft_config.values():
69
+ if not config.inference_mode:
70
+ traininable_mode_counter += 1
71
+
72
+ if traininable_mode_counter > 1:
73
+ raise ValueError(
74
+ "AdaLoraModel supports only 1 trainable adapter. "
75
+ "When using multiple adapters, set inference_mode to True for all adapters except the one you want to train."
76
+ )
77
+
78
+ if self.peft_config[adapter_name].inference_mode:
79
+ _freeze_adapter(self.model, adapter_name)
80
+ else:
81
+ self.trainable_adapter_name = adapter_name
82
+ self.rankallocator = RankAllocator(self.model, self.peft_config[adapter_name], self.trainable_adapter_name)
83
+
84
+ def _check_new_adapter_config(self, config: LoraConfig) -> None:
85
+ """
86
+ A helper method to check the config when a new adapter is being added.
87
+
88
+ Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters.
89
+
90
+ """
91
+ super()._check_new_adapter_config(config)
92
+
93
+ traininable_mode_counter = 0
94
+ for config_ in self.peft_config.values():
95
+ if not config_.inference_mode:
96
+ traininable_mode_counter += 1
97
+
98
+ if traininable_mode_counter > 1:
99
+ raise ValueError(
100
+ f"{self.__class__.__name__} supports only 1 trainable adapter. "
101
+ "When using multiple adapters, set inference_mode to True for all adapters except the one "
102
+ "you want to train."
103
+ )
104
+
105
+ def _create_and_replace(
106
+ self,
107
+ lora_config,
108
+ adapter_name,
109
+ target,
110
+ target_name,
111
+ parent,
112
+ current_key,
113
+ ):
114
+ kwargs = {
115
+ "r": lora_config.init_r,
116
+ "lora_alpha": lora_config.lora_alpha,
117
+ "lora_dropout": lora_config.lora_dropout,
118
+ "fan_in_fan_out": lora_config.fan_in_fan_out,
119
+ "init_lora_weights": lora_config.init_lora_weights,
120
+ "loaded_in_8bit": getattr(self.model, "is_loaded_in_8bit", False),
121
+ "loaded_in_4bit": getattr(self.model, "is_loaded_in_4bit", False),
122
+ }
123
+ if (kwargs["loaded_in_8bit"] or kwargs["loaded_in_4bit"]) and not is_bnb_available():
124
+ raise ImportError(
125
+ "To use AdaLora with 8-bit quantization, please install the `bitsandbytes` package. "
126
+ "You can install it with `pip install bitsandbytes`."
127
+ )
128
+
129
+ quantization_config = get_quantization_config(self.model, method="gptq")
130
+ if quantization_config is not None:
131
+ kwargs["gptq_quantization_config"] = quantization_config
132
+
133
+ # If it is not an AdaLoraLayer, create a new module, else update it with new adapters
134
+ if not isinstance(target, AdaLoraLayer):
135
+ new_module = self._create_new_module(lora_config, adapter_name, target, **kwargs)
136
+ if adapter_name != self.active_adapter:
137
+ # adding an additional adapter: it is not automatically trainable
138
+ new_module.requires_grad_(False)
139
+ self._replace_module(parent, target_name, new_module, target)
140
+ else:
141
+ target.update_layer(
142
+ adapter_name,
143
+ lora_config.init_r,
144
+ lora_config.lora_alpha,
145
+ lora_config.lora_dropout,
146
+ lora_config.init_lora_weights,
147
+ )
148
+
149
+ @staticmethod
150
+ def _create_new_module(lora_config, adapter_name, target, **kwargs):
151
+ # avoid eager bnb import
152
+ if is_bnb_available():
153
+ import bitsandbytes as bnb
154
+
155
+ from .bnb import SVDLinear8bitLt
156
+ if is_bnb_4bit_available():
157
+ from .bnb import SVDLinear4bit
158
+
159
+ gptq_quantization_config = kwargs.get("gptq_quantization_config", None)
160
+ AutoGPTQQuantLinear = get_auto_gptq_quant_linear(gptq_quantization_config)
161
+
162
+ loaded_in_8bit = kwargs.pop("loaded_in_8bit", False)
163
+ loaded_in_4bit = kwargs.pop("loaded_in_4bit", False)
164
+
165
+ if isinstance(target, BaseTunerLayer):
166
+ target_base_layer = target.get_base_layer()
167
+ else:
168
+ target_base_layer = target
169
+
170
+ if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt):
171
+ kwargs.update(
172
+ {
173
+ "has_fp16_weights": target_base_layer.state.has_fp16_weights,
174
+ "memory_efficient_backward": target_base_layer.state.memory_efficient_backward,
175
+ "threshold": target_base_layer.state.threshold,
176
+ "index": target_base_layer.index,
177
+ }
178
+ )
179
+ new_module = SVDLinear8bitLt(target, adapter_name, **kwargs)
180
+ elif loaded_in_4bit and is_bnb_4bit_available() and isinstance(target_base_layer, bnb.nn.Linear4bit):
181
+ fourbit_kwargs = kwargs.copy()
182
+ fourbit_kwargs.update(
183
+ {
184
+ "compute_dtype": target_base_layer.compute_dtype,
185
+ "compress_statistics": target_base_layer.weight.compress_statistics,
186
+ "quant_type": target_base_layer.weight.quant_type,
187
+ }
188
+ )
189
+ new_module = SVDLinear4bit(target, adapter_name, **fourbit_kwargs)
190
+ elif AutoGPTQQuantLinear is not None and isinstance(target, AutoGPTQQuantLinear):
191
+ new_module = SVDQuantLinear(target, adapter_name, **kwargs)
192
+ else:
193
+ if isinstance(target_base_layer, torch.nn.Linear):
194
+ if kwargs["fan_in_fan_out"]:
195
+ warnings.warn(
196
+ "fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. "
197
+ "Setting fan_in_fan_out to False."
198
+ )
199
+ kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False
200
+ elif isinstance(target_base_layer, Conv1D):
201
+ if not kwargs["fan_in_fan_out"]:
202
+ warnings.warn(
203
+ "fan_in_fan_out is set to False but the target module is `Conv1D`. "
204
+ "Setting fan_in_fan_out to True."
205
+ )
206
+ kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = True
207
+ else:
208
+ raise ValueError(
209
+ f"Target module {target} is not supported. "
210
+ f"Currently, only `torch.nn.Linear` and `Conv1D` are supported."
211
+ )
212
+ new_module = SVDLinear(target, adapter_name, **kwargs)
213
+
214
+ return new_module
215
+
216
+ @staticmethod
217
+ def _prepare_adapter_config(peft_config, model_config):
218
+ if peft_config.target_modules is None:
219
+ if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING:
220
+ raise ValueError("Please specify `target_modules` in `peft_config`")
221
+ peft_config.target_modules = TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING[
222
+ model_config["model_type"]
223
+ ]
224
+ return peft_config
225
+
226
+ def __getattr__(self, name: str):
227
+ """Forward missing attributes to the wrapped module."""
228
+ try:
229
+ return super().__getattr__(name) # defer to nn.Module's logic
230
+ except AttributeError:
231
+ return getattr(self.model, name)
232
+
233
+ def forward(self, *args, **kwargs):
234
+ outputs = self.model.forward(*args, **kwargs)
235
+
236
+ if (getattr(outputs, "loss", None) is not None) and isinstance(outputs.loss, torch.Tensor):
237
+ # Calculate the orthogonal regularization
238
+ orth_reg_weight = self.peft_config[self.trainable_adapter_name].orth_reg_weight
239
+
240
+ if orth_reg_weight <= 0:
241
+ raise ValueError("orth_reg_weight should be greater than 0. ")
242
+
243
+ regu_loss = 0
244
+ num_param = 0
245
+ for n, p in self.model.named_parameters():
246
+ if ("lora_A" in n or "lora_B" in n) and self.trainable_adapter_name in n:
247
+ para_cov = p @ p.T if "lora_A" in n else p.T @ p
248
+ I = torch.eye(*para_cov.size(), out=torch.empty_like(para_cov)) # noqa: E741
249
+ I.requires_grad = False
250
+ num_param += 1
251
+ regu_loss += torch.norm(para_cov - I, p="fro")
252
+ if num_param > 0:
253
+ regu_loss = regu_loss / num_param
254
+ else:
255
+ regu_loss = 0
256
+ outputs.loss += orth_reg_weight * regu_loss
257
+ return outputs
258
+
259
+ def resize_modules_by_rank_pattern(self, rank_pattern, adapter_name):
260
+ lora_config = self.peft_config[adapter_name]
261
+ for name, rank_idx in rank_pattern.items():
262
+ if isinstance(rank_idx, list):
263
+ rank = sum(rank_idx)
264
+ elif isinstance(rank_idx, torch.Tensor):
265
+ rank_idx = rank_idx.view(-1)
266
+ rank = rank_idx.sum().item()
267
+ else:
268
+ raise ValueError("Unexpected type of rank_idx")
269
+ key = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1])
270
+ _, target, _ = _get_submodules(self.model, key)
271
+ lora_E_weights = target.lora_E[adapter_name][rank_idx]
272
+ lora_A_weights = target.lora_A[adapter_name][rank_idx]
273
+ lora_B_weights = target.lora_B[adapter_name][:, rank_idx]
274
+ ranknum = target.ranknum[adapter_name]
275
+ target.update_layer(
276
+ adapter_name,
277
+ rank,
278
+ lora_config.lora_alpha,
279
+ lora_config.lora_dropout,
280
+ lora_config.init_lora_weights,
281
+ )
282
+ with torch.no_grad():
283
+ if rank > 0:
284
+ target.lora_E[adapter_name].copy_(lora_E_weights)
285
+ target.lora_A[adapter_name].copy_(lora_A_weights)
286
+ target.lora_B[adapter_name].copy_(lora_B_weights)
287
+ # The scaling is exactly as the previous
288
+ target.ranknum[adapter_name].copy_(ranknum)
289
+
290
+ def resize_state_dict_by_rank_pattern(self, rank_pattern, state_dict, adapter_name):
291
+ for name, rank_idx in rank_pattern.items():
292
+ rank = sum(rank_idx)
293
+ prefix = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1])
294
+ for layer in ["lora_E", "lora_A", "lora_B"]:
295
+ key = f"base_model.model.{prefix}.{layer}.{adapter_name}"
296
+ if layer != "lora_B":
297
+ state_dict[key] = (
298
+ state_dict[key][rank_idx] if rank != state_dict[key].shape[0] else state_dict[key]
299
+ )
300
+ else:
301
+ state_dict[key] = (
302
+ state_dict[key][:, rank_idx] if rank != state_dict[key].shape[1] else state_dict[key]
303
+ )
304
+ return state_dict
305
+
306
+ def update_and_allocate(self, global_step):
307
+ """
308
+ This method updates Adalora budget and mask.
309
+
310
+ This should be called in every training step after `loss.backward()` and before `zero_grad()`.
311
+
312
+ `tinit`, `tfinal` and `deltaT` are handled with in the method.
313
+
314
+ Args:
315
+ global_step (`int`): The current training step, it is used to calculate adalora budget.
316
+
317
+ Example:
318
+
319
+ ```python
320
+ >>> loss = model(**input).loss
321
+ >>> loss.backward()
322
+ >>> optimizer.step()
323
+ >>> model.base_model.update_and_allocate(i_step)
324
+ >>> optimizer.zero_grad()
325
+ ```
326
+ """
327
+ lora_config = self.peft_config[self.trainable_adapter_name]
328
+ # Update the importance score and allocate the budget
329
+ if global_step < lora_config.total_step - lora_config.tfinal:
330
+ _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step)
331
+ if rank_pattern:
332
+ lora_config.rank_pattern = rank_pattern
333
+ # Finalize the budget allocation
334
+ elif global_step == lora_config.total_step - lora_config.tfinal:
335
+ _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step, force_mask=True)
336
+ # for some reason, this freezes the trainable parameters and nothing gets updates
337
+ # self.resize_modules_by_rank_pattern(rank_pattern, self.trainable_adapter_name)
338
+ lora_config.rank_pattern = rank_pattern
339
+ self.rankallocator.reset_ipt()
340
+ # Currently using inefficient way to mask the unimportant weights using the rank pattern
341
+ # due to problem mentioned above
342
+ elif global_step > lora_config.total_step - lora_config.tfinal:
343
+ self.rankallocator.mask_using_rank_pattern(self.model, lora_config.rank_pattern)
344
+ # Pass the function and do forward propagation
345
+ else:
346
+ return None
venv/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from .config import AdaptionPromptConfig
15
+ from .layer import AdaptedAttention
16
+ from .model import AdaptionPromptModel
17
+
18
+
19
+ __all__ = ["AdaptionPromptConfig", "AdaptedAttention", "AdaptionPromptModel"]
venv/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (377 Bytes). View file
 
venv/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/config.cpython-310.pyc ADDED
Binary file (2.11 kB). View file
 
venv/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/layer.cpython-310.pyc ADDED
Binary file (3.28 kB). View file
 
venv/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/model.cpython-310.pyc ADDED
Binary file (5.56 kB). View file
 
venv/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/utils.cpython-310.pyc ADDED
Binary file (3.55 kB). View file
 
venv/lib/python3.10/site-packages/peft/tuners/adaption_prompt/config.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from collections import namedtuple
16
+ from dataclasses import dataclass, field
17
+
18
+ from peft.config import PeftConfig
19
+ from peft.utils import PeftType
20
+
21
+ from .utils import llama_compute_query_states
22
+
23
+
24
+ @dataclass
25
+ class AdaptionPromptConfig(PeftConfig):
26
+ """Stores the configuration of an [`AdaptionPromptModel`]."""
27
+
28
+ target_modules: str = field(
29
+ default=None, metadata={"help": "Name of the attention submodules to insert adaption prompts into."}
30
+ )
31
+ adapter_len: int = field(default=None, metadata={"help": "Number of adapter tokens to insert"})
32
+ adapter_layers: int = field(default=None, metadata={"help": "Number of adapter layers (from the top)"})
33
+
34
+ def __post_init__(self):
35
+ self.peft_type = PeftType.ADAPTION_PROMPT
36
+
37
+ @property
38
+ def is_adaption_prompt(self) -> bool:
39
+ """Return True if this is an adaption prompt config."""
40
+ return True
41
+
42
+
43
+ # Contains the config that is specific to a transformers model type.
44
+ ModelTypeConfig = namedtuple(
45
+ "ModelTypeConfig", ["compute_query_states", "target_modules", "k_proj_layer", "v_proj_layer", "o_proj_layer"]
46
+ )
47
+
48
+ # Mapping of transformers model types to their specific configuration.
49
+ TRANSFORMERS_MODEL_CONFIG = {
50
+ "llama": ModelTypeConfig(
51
+ compute_query_states=llama_compute_query_states,
52
+ target_modules="self_attn",
53
+ k_proj_layer="k_proj",
54
+ v_proj_layer="v_proj",
55
+ o_proj_layer="o_proj",
56
+ ),
57
+ "mistral": ModelTypeConfig( # same as llama,
58
+ compute_query_states=llama_compute_query_states,
59
+ target_modules="self_attn",
60
+ k_proj_layer="k_proj",
61
+ v_proj_layer="v_proj",
62
+ o_proj_layer="o_proj",
63
+ ),
64
+ }
65
+
66
+
67
+ def prepare_config(
68
+ peft_config: AdaptionPromptConfig,
69
+ model,
70
+ ) -> AdaptionPromptConfig:
71
+ """Prepare the config based on the llama model type."""
72
+ if model.config.model_type not in TRANSFORMERS_MODEL_CONFIG:
73
+ raise ValueError("Unsupported model type for adaption prompt: '{model.config.model_type}'.")
74
+
75
+ model_config = TRANSFORMERS_MODEL_CONFIG[model.config.model_type]
76
+
77
+ if peft_config.target_modules is None:
78
+ peft_config.target_modules = model_config.target_modules
79
+
80
+ return peft_config
venv/lib/python3.10/site-packages/peft/tuners/adaption_prompt/layer.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+
17
+ import torch
18
+ import torch.nn as nn
19
+ import torch.nn.functional as F
20
+
21
+ from .config import TRANSFORMERS_MODEL_CONFIG
22
+
23
+
24
+ class AdaptedAttention(nn.Module):
25
+ """This module wraps a LLamaAttention module and injects adaption prompts."""
26
+
27
+ def __init__(self, model_type: str, adapter_len: int, model):
28
+ """
29
+ Initialize object.
30
+
31
+ Args:
32
+ model_type: The transformer model type. This is used to retrieve the right method to
33
+ compute query states.
34
+ adapter_len: The length of the adaption prompt to insert.
35
+ model: The original transformer attention module that is being wrapped.
36
+ """
37
+ assert not isinstance(model, AdaptedAttention)
38
+ super().__init__()
39
+ self.model_type = model_type
40
+ self.model = model
41
+ self.adapter_len = adapter_len
42
+ # Assume all parameters of the attention model we are wrapping are on the same device.
43
+ device = next(model.parameters()).device
44
+ # Don't think this was specified in the paper, but we follow the official repo which used an Embedding
45
+ # which initializes the tokens with standard normal values.
46
+ # https://github.com/ZrrSkywalker/LLaMA-Adapter/blob/41c3546fe1997ab8a65809dc8d8f9252b19d9faf/llama/model.py#L234
47
+ # (bsz, adapter_len, hidden_size)
48
+ target_dtype = (
49
+ model.q_proj.weight.dtype if model.q_proj.weight.dtype not in [torch.int8, torch.uint8] else torch.float32
50
+ )
51
+ self.adaption_prompt = nn.Parameter(
52
+ torch.empty(1, adapter_len, self.model.hidden_size, device=device, dtype=target_dtype).normal_()
53
+ )
54
+ # Initialize the gate to 0 as this is "zero-init".
55
+ self.adaption_gate = nn.Parameter(torch.zeros(1, device=device, dtype=target_dtype))
56
+
57
+ def forward(self, **kwargs):
58
+ """
59
+ Forward pass for the adapter which wraps the original LlamaAttention module.
60
+
61
+ "Official" paper implementation:
62
+ https://github.com/ZrrSkywalker/LLaMA-Adapter/blob/41c3546fe1997ab8a65809dc8d8f9252b19d9faf/llama/model.py#L141
63
+
64
+ Args:
65
+ kwargs: See the original LlamaAttention module.
66
+ """
67
+ if kwargs.get("output_attention", False):
68
+ raise NotImplementedError("output_attention is not currently supported.")
69
+
70
+ output, _, past_key_value = self.model(**kwargs)
71
+ bsz = output.shape[0]
72
+ q_len = output.shape[1]
73
+ embed_dim = output.shape[2]
74
+ k_proj_layer = TRANSFORMERS_MODEL_CONFIG[self.model_type].k_proj_layer
75
+ v_proj_layer = TRANSFORMERS_MODEL_CONFIG[self.model_type].v_proj_layer
76
+ o_proj_layer = TRANSFORMERS_MODEL_CONFIG[self.model_type].o_proj_layer
77
+ factor = (
78
+ self.model.k_proj.in_features // self.model.k_proj.out_features
79
+ ) # Mistral has different input and output dimension for k_proj and v_proj layers
80
+
81
+ if k_proj_layer == v_proj_layer:
82
+ _, key, value = getattr(self.model, k_proj_layer)(self.adaption_prompt).split(embed_dim, dim=2)
83
+ else:
84
+ key = getattr(self.model, k_proj_layer)(self.adaption_prompt)
85
+ value = getattr(self.model, v_proj_layer)(self.adaption_prompt)
86
+
87
+ # (bsz, num_key_value_heads, adapter_len, head_dim)
88
+ adapter_k = (
89
+ key.view(1, self.adapter_len, (self.model.num_heads // factor), self.model.head_dim)
90
+ .repeat(bsz, 1, 1, 1)
91
+ .transpose(1, 2)
92
+ )
93
+ adapter_v = (
94
+ value.view(1, self.adapter_len, (self.model.num_heads // factor), self.model.head_dim)
95
+ .repeat(bsz, 1, 1, 1)
96
+ .transpose(1, 2)
97
+ )
98
+ # Below is taken from https://github.com/huggingface/transformers/blob/e547458c43dfdbbb8f6a7757237e234c44e20a8f/src/transformers/models/mistral/modeling_mistral.py#L181
99
+ # (bsz, num_heads, adapter_len, head_dim)
100
+ adapter_k = torch.repeat_interleave(adapter_k, repeats=factor, dim=1)
101
+ adapter_v = torch.repeat_interleave(adapter_v, repeats=factor, dim=1)
102
+ # Recompute query states.
103
+ compute_query_states = TRANSFORMERS_MODEL_CONFIG[self.model_type].compute_query_states
104
+ # (bsz, num_heads, q_len, head_dim)
105
+ query_states = compute_query_states(model=self.model, **kwargs)
106
+
107
+ previous_dtype = query_states.dtype
108
+
109
+ # (bsz, num_heads, q_len, adapter_len)
110
+ scores = torch.matmul(query_states, adapter_k.transpose(2, 3).to(previous_dtype)) / math.sqrt(
111
+ self.model.head_dim
112
+ )
113
+ # Upcast attention to fp32
114
+ # (bsz, num_heads, q_len, adapter_len)
115
+ scores = self.adaption_gate * F.softmax(scores, dim=-1, dtype=torch.float32).to(previous_dtype)
116
+ # (bsz, q_len, num_heads * head_dim)
117
+ adapter_output = torch.matmul(scores, adapter_v).transpose(1, 2).reshape(bsz, q_len, -1)
118
+
119
+ # (bsz, q_len, hidden_size)
120
+ if o_proj_layer is not None:
121
+ adapter_output = getattr(self.model, o_proj_layer)(adapter_output)
122
+
123
+ # Add adaption prompt output to original output.
124
+ output = output + adapter_output
125
+
126
+ # Restore original dtype.
127
+ output = output.to(previous_dtype)
128
+ return output, None, past_key_value
venv/lib/python3.10/site-packages/peft/tuners/adaption_prompt/model.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Dict, List
16
+
17
+ import torch.nn as nn
18
+
19
+ from peft.utils import _freeze_adapter, _get_submodules
20
+
21
+ from .config import AdaptionPromptConfig, prepare_config
22
+ from .layer import AdaptedAttention
23
+ from .utils import is_adaption_prompt_trainable
24
+
25
+
26
+ class AdaptionPromptModel(nn.Module):
27
+ """
28
+ Implements adaption prompts as described in https://arxiv.org/pdf/2303.16199.pdf.
29
+
30
+ The top L attention modules are replaced with AdaptedAttention modules that wrap the original ones, but insert
31
+ trainable prompts with gates (for zero init).
32
+
33
+ Notes on the multi-adapter pattern:
34
+ - We store the states of different adapters by keeping a dictionary of AdaptedAttention modules indexed by adapter
35
+ name.
36
+ - Every time we switch adapters, we remove the modules of the currently active adapter from the model, store them
37
+ in the dictionary, and replace them with the modules of the new adapter.
38
+ - To avoid duplicated and potentially inconsistent state, the currently active adapter is always removed from the
39
+ dictionary.
40
+ - Disabling the adapter would also result in the modules being removed from the model.
41
+ """
42
+
43
+ def __init__(self, model, configs: Dict, adapter_name: str):
44
+ super().__init__()
45
+ self.model = model
46
+ # Store adapter configs by name.
47
+ self.peft_config: Dict[str, AdaptionPromptConfig] = {}
48
+ # Store lists of the parents of the affected attention modules by adapter name.
49
+ # We keep references to the parents so we can swap the adapters in-and-out of the model.
50
+ self._parents: Dict[str, List[nn.Module]] = {}
51
+ # Store lists of cached AdaptedAttention modules by name.
52
+ self._cached_adapters: Dict[str, List] = {}
53
+ # The name of the currently active adapter.
54
+ self._active_adapter = None
55
+ # Whether the adapter is enabled.
56
+ self._enabled = True
57
+ self.forward = self.model.forward
58
+ self.add_adapter(adapter_name, configs[adapter_name])
59
+ self._mark_only_adaption_prompts_as_trainable(self.model)
60
+
61
+ def add_adapter(self, adapter_name: str, config: AdaptionPromptConfig) -> None:
62
+ """Add an adapter with the given name and config."""
63
+ config = prepare_config(config, self.model)
64
+ if adapter_name in self.peft_config:
65
+ raise ValueError(f"Adapter with name '{adapter_name}' already exists.")
66
+
67
+ parents = []
68
+ for name, _ in self.model.named_modules():
69
+ if name.endswith(config.target_modules):
70
+ par, _, _ = _get_submodules(self.model, name)
71
+ parents.append(par)
72
+ if len(parents) < config.adapter_layers:
73
+ raise ValueError(
74
+ f"Config specifies more adapter layers '{config.adapter_layers}'"
75
+ f" than the model has '{len(parents)}'."
76
+ )
77
+ # Note that if the target modules are not in Sequential, ModuleList, or
78
+ # some other PyTorch ordered container, the behavior is undefined as we
79
+ # assume here that the order of the modules is the same as the order of
80
+ # the transformer decoder layers.
81
+ parents = parents[-config.adapter_layers :]
82
+ self._parents[adapter_name] = parents
83
+
84
+ # It is only None during initialization.
85
+ # If it is disabled, we don't have to remove the modules.
86
+ if self._active_adapter is not None and self._enabled:
87
+ self._remove_adapted_attentions(self._active_adapter)
88
+ self._active_adapter = adapter_name
89
+ self.peft_config[adapter_name] = config
90
+ self._create_adapted_attentions(config, parents)
91
+ if not self._enabled:
92
+ self._remove_adapted_attentions(self._active_adapter)
93
+
94
+ if config.inference_mode:
95
+ _freeze_adapter(self.model, adapter_name)
96
+
97
+ def set_adapter(self, adapter_name: str) -> None:
98
+ """Set the model to use the adapter with the given name."""
99
+ if self._active_adapter == adapter_name:
100
+ return
101
+ if adapter_name not in self.peft_config:
102
+ raise ValueError(f"Adapter with name '{adapter_name}' does not exist.")
103
+
104
+ if self._enabled:
105
+ self._remove_adapted_attentions(self._active_adapter)
106
+ self._set_adapted_attentions(adapter_name)
107
+
108
+ self._active_adapter = adapter_name
109
+
110
+ def enable_adapter_layers(self):
111
+ """Enable adapter layers by swapping in cached AdaptedAttention modules."""
112
+ self._enabled = True
113
+ self._set_adapted_attentions(self._active_adapter)
114
+
115
+ def disable_adapter_layers(self):
116
+ """Disable adapter layers by swapping out AdaptedAttention modules."""
117
+ self._enabled = False
118
+ self._remove_adapted_attentions(self._active_adapter)
119
+
120
+ def _create_adapted_attentions(self, config: AdaptionPromptConfig, parents: List[nn.Module]) -> None:
121
+ """Wrap LlamaAttention modules with newly created AdaptedAttention modules."""
122
+ for par in parents:
123
+ attn = AdaptedAttention(
124
+ model_type=self.model.config.model_type,
125
+ adapter_len=config.adapter_len,
126
+ model=getattr(par, config.target_modules),
127
+ )
128
+ setattr(par, config.target_modules, attn)
129
+
130
+ def _set_adapted_attentions(self, adapter_name: str) -> None:
131
+ """Replace LlamaAttention modules with cached AdaptedAttention modules."""
132
+ cached = self._cached_adapters[adapter_name]
133
+ del self._cached_adapters[adapter_name]
134
+ config = self.peft_config[adapter_name]
135
+ for i, par in enumerate(self._parents[adapter_name]):
136
+ setattr(par, config.target_modules, cached[i])
137
+
138
+ def _remove_adapted_attentions(self, adapter_name: str) -> None:
139
+ """Remove AdaptedAttention modules from the model and store them in the cache."""
140
+ config = self.peft_config[adapter_name]
141
+ adapted_attentions = []
142
+ for par in self._parents[adapter_name]:
143
+ attn = getattr(par, config.target_modules)
144
+ adapted_attentions.append(attn)
145
+ setattr(par, config.target_modules, attn.model)
146
+ self._cached_adapters[adapter_name] = adapted_attentions
147
+
148
+ def _mark_only_adaption_prompts_as_trainable(self, model: nn.Module) -> None:
149
+ """Freeze all parameters of the model except the adaption prompts."""
150
+ for n, p in model.named_parameters():
151
+ if not is_adaption_prompt_trainable(n):
152
+ p.requires_grad = False
153
+
154
+ def __getattr__(self, name: str):
155
+ """Forward missing attributes to the wrapped module."""
156
+ try:
157
+ return super().__getattr__(name) # defer to nn.Module's logic
158
+ except AttributeError:
159
+ # This is necessary as e.g. causal models have various methods that we
160
+ # don't want to re-implement here.
161
+ return getattr(self.model, name)
venv/lib/python3.10/site-packages/peft/tuners/adaption_prompt/utils.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import inspect
15
+
16
+ import torch
17
+ import torch.nn as nn
18
+
19
+
20
+ def llama_rotate_half(x: torch.Tensor) -> torch.Tensor:
21
+ """
22
+ Rotate half the hidden dims of the input.
23
+
24
+ This function was duplicated verbatim from:
25
+ https://github.com/huggingface/transformers/blob/1de8ce9ee1191ba761a593ac15d9ccbf5851bfc5/src/transformers/models/llama/modeling_llama.py#L126
26
+
27
+ This was done to eliminate the Llama transformers implementation as a dependency of this file. Note that some other
28
+ functions were also adapted from the transformers implementation but were modified.
29
+ """
30
+ x1 = x[..., : x.shape[-1] // 2]
31
+ x2 = x[..., x.shape[-1] // 2 :]
32
+ return torch.cat((-x2, x1), dim=-1)
33
+
34
+
35
+ def llama_apply_rotary_pos_emb(q, cos, sin, position_ids):
36
+ """
37
+ Apply rotary position embedding to query states in the Llama model.
38
+
39
+ This function was adapted from:
40
+ https://github.com/huggingface/transformers/blob/1de8ce9ee1191ba761a593ac15d9ccbf5851bfc5/src/transformers/models/llama/modeling_llama.py#L133
41
+
42
+ It was modified to remove unnecessary processing of key states. The method is compatible with transformers <=
43
+ 4.34.2 and also with the latest version (>=4.35).
44
+ """
45
+ # In previous transformers version cos/sin cached had a shape of 4D
46
+ if len(cos.shape) == 4:
47
+ gather_indices = position_ids[:, None, :, None] # [bs, 1, seq_len, 1]
48
+ gather_indices = gather_indices.repeat(1, cos.shape[1], 1, cos.shape[3])
49
+ cos = torch.gather(cos.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
50
+ sin = torch.gather(sin.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
51
+ # In the new version, it is 2D so we fall back to the new implementation
52
+ # https://github.com/huggingface/transformers/blame/eef7ea98c31a333bacdc7ae7a2372bde772be8e4/src/transformers/models/llama/modeling_llama.py#L222-L226
53
+ else:
54
+ cos = cos[position_ids].unsqueeze(1)
55
+ sin = sin[position_ids].unsqueeze(1)
56
+ q_embed = (q * cos) + (llama_rotate_half(q) * sin)
57
+ return q_embed
58
+
59
+
60
+ def llama_compute_query_states(model: nn.Module, **kwargs) -> torch.Tensor:
61
+ """
62
+ Compute query states for Llama models specifically. They need to be recomputed as the forward() method of the
63
+ original LlamaModel in the transformers library does not return them. See the related discussion in the PR:
64
+ https://github.com/huggingface/peft/pull/268
65
+ """
66
+ hidden_states = kwargs.get("hidden_states")
67
+ position_ids = kwargs.get("position_ids")
68
+ past_key_value = kwargs.get("past_key_value")
69
+ bsz, q_len, _ = hidden_states.size()
70
+ query_states = model.q_proj(hidden_states).view(bsz, q_len, model.num_heads, model.head_dim).transpose(1, 2)
71
+
72
+ factor = model.k_proj.in_features // model.k_proj.out_features
73
+ value_states = (
74
+ model.v_proj(hidden_states).view(bsz, q_len, (model.num_heads // factor), model.head_dim).transpose(1, 2)
75
+ )
76
+
77
+ seq_len = q_len
78
+
79
+ if past_key_value is not None:
80
+ if isinstance(past_key_value, tuple):
81
+ # for transformers <= 4.35
82
+ seq_len += past_key_value[0].shape[-2]
83
+ else:
84
+ # since transformers 4.36, this is a DynamicCache instance
85
+ seq_len += past_key_value.get_seq_length(model.layer_idx)
86
+
87
+ # For transformers > 4.37.2 `position_ids` became a required arguments in the rotary embedding's forward pass.
88
+ if "position_ids" not in inspect.signature(model.rotary_emb.forward).parameters:
89
+ # TODO we assume that position_ids is not None here, not sure if that is safe but the old code also did that
90
+ cos, sin = model.rotary_emb(value_states, seq_len=seq_len)
91
+ return llama_apply_rotary_pos_emb(query_states, cos, sin, position_ids)
92
+
93
+ past_seen_tokens = 0
94
+ if position_ids is None:
95
+ # Compute position_ids, since they are required for transformers > 4.37.2
96
+ if past_key_value is None:
97
+ new_cache_positions = torch.arange(q_len, q_len + q_len, device=value_states.device)
98
+ else:
99
+ past_seen_tokens = past_key_value.get_usable_length(q_len, model.layer_idx)
100
+ new_cache_positions = torch.arange(past_seen_tokens, past_seen_tokens + q_len, device=value_states.device)
101
+ position_ids = new_cache_positions.unsqueeze(0)
102
+
103
+ rotary_emb_kwargs = {"position_ids": position_ids}
104
+ # The `seq_len` argument has been officially removed in transformers >= 4.39.0
105
+ if "seq_len" in inspect.signature(model.rotary_emb.forward).parameters:
106
+ rotary_emb_kwargs["seq_len"] = q_len + past_seen_tokens
107
+
108
+ cos, sin = model.rotary_emb(value_states, **rotary_emb_kwargs)
109
+
110
+ # For batched inference unsqueeze it on the correct dim
111
+ # since: https://github.com/huggingface/transformers/pull/29109
112
+ if len(cos.shape) == 3:
113
+ cos = cos.unsqueeze(1)
114
+ sin = sin.unsqueeze(1)
115
+
116
+ return (query_states * cos) + (llama_rotate_half(query_states) * sin)
117
+
118
+
119
+ def is_adaption_prompt_trainable(params: str) -> bool:
120
+ """Return True if module is trainable under adaption prompt fine-tuning."""
121
+ return params.split(".")[-1].startswith("adaption_")
venv/lib/python3.10/site-packages/peft/tuners/loha/__init__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .config import LoHaConfig
16
+ from .layer import Conv2d, Linear, LoHaLayer
17
+ from .model import LoHaModel
18
+
19
+
20
+ __all__ = ["LoHaConfig", "LoHaModel", "Conv2d", "Linear", "LoHaLayer"]
venv/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (383 Bytes). View file
 
venv/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/config.cpython-310.pyc ADDED
Binary file (5.36 kB). View file
 
venv/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/layer.cpython-310.pyc ADDED
Binary file (10.1 kB). View file
 
venv/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/model.cpython-310.pyc ADDED
Binary file (3.9 kB). View file
 
venv/lib/python3.10/site-packages/peft/tuners/loha/config.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from dataclasses import dataclass, field
16
+ from typing import List, Optional, Union
17
+
18
+ from peft.tuners.lycoris_utils import LycorisConfig
19
+ from peft.utils import PeftType
20
+
21
+
22
+ @dataclass
23
+ class LoHaConfig(LycorisConfig):
24
+ """
25
+ This is the configuration class to store the configuration of a [`LoHaModel`].
26
+
27
+ Args:
28
+ r (`int`):
29
+ LoHa rank.
30
+ alpha (`int`):
31
+ The alpha parameter for LoHa scaling.
32
+ rank_dropout (`float`):
33
+ The dropout probability for rank dimension during training.
34
+ module_dropout (`float`):
35
+ The dropout probability for disabling LoHa modules during training.
36
+ use_effective_conv2d (`bool`):
37
+ Use parameter effective decomposition for Conv2d with ksize > 1 ("Proposition 3" from FedPara paper).
38
+ target_modules (`Optional[Union[List[str], str]]`):
39
+ The names of the modules to apply the adapter to. If this is specified, only the modules with the specified
40
+ names will be replaced. When passing a string, a regex match will be performed. When passing a list of
41
+ strings, either an exact match will be performed or it is checked if the name of the module ends with any
42
+ of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen,
43
+ excluding the output layer. If this is not specified, modules will be chosen according to the model
44
+ architecture. If the architecture is not known, an error will be raised -- in this case, you should specify
45
+ the target modules manually.
46
+ init_weights (`bool`):
47
+ Whether to perform initialization of adapter weights. This defaults to `True`, passing `False` is
48
+ discouraged.
49
+ layers_to_transform (`Union[List[int], int]`):
50
+ The layer indices to transform. If a list of ints is passed, it will apply the adapter to the layer indices
51
+ that are specified in this list. If a single integer is passed, it will apply the transformations on the
52
+ layer at this index.
53
+ layers_pattern (`str`):
54
+ The layer pattern name, used only if `layers_to_transform` is different from `None`.
55
+ rank_pattern (`dict`):
56
+ The mapping from layer names or regexp expression to ranks which are different from the default rank
57
+ specified by `r`.
58
+ alpha_pattern (`dict`):
59
+ The mapping from layer names or regexp expression to alphas which are different from the default alpha
60
+ specified by `alpha`.
61
+ modules_to_save (`Optional[List[str]]`):
62
+ List of modules apart from adapter layers to be set as trainable and saved in the final checkpoint.
63
+ """
64
+
65
+ r: int = field(default=8, metadata={"help": "LoHa rank"})
66
+ alpha: int = field(default=8, metadata={"help": "LoHa alpha"})
67
+ rank_dropout: float = field(
68
+ default=0.0, metadata={"help": "The dropout probability for rank dimension during training"}
69
+ )
70
+ module_dropout: float = field(
71
+ default=0.0, metadata={"help": "The dropout probability for disabling LoHa modules during training"}
72
+ )
73
+ use_effective_conv2d: bool = field(
74
+ default=False,
75
+ metadata={
76
+ "help": 'Use parameter effective decomposition for Conv2d 3x3 with ksize > 1 ("Proposition 3" from FedPara paper)'
77
+ },
78
+ )
79
+ target_modules: Optional[Union[List[str], str]] = field(
80
+ default=None,
81
+ metadata={
82
+ "help": "List of module names or regex expression of the module names to replace with LoHa."
83
+ "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' "
84
+ "This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer."
85
+ },
86
+ )
87
+ init_weights: bool = field(
88
+ default=True,
89
+ metadata={
90
+ "help": (
91
+ "Whether to initialize the weights of the LoHa layers with their default initialization. Don't change "
92
+ "this setting, except if you know exactly what you're doing."
93
+ ),
94
+ },
95
+ )
96
+ layers_to_transform: Optional[Union[List[int], int]] = field(
97
+ default=None,
98
+ metadata={
99
+ "help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index."
100
+ },
101
+ )
102
+ layers_pattern: Optional[str] = field(
103
+ default=None,
104
+ metadata={
105
+ "help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern."
106
+ },
107
+ )
108
+ modules_to_save: Optional[List[str]] = field(
109
+ default=None,
110
+ metadata={
111
+ "help": "List of modules apart from LoHA layers to be set as trainable and saved in the final checkpoint. "
112
+ "For example, in Sequence Classification or Token Classification tasks, "
113
+ "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved."
114
+ },
115
+ )
116
+
117
+ def __post_init__(self):
118
+ self.peft_type = PeftType.LOHA
119
+ self.target_modules = (
120
+ set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
121
+ )
venv/lib/python3.10/site-packages/peft/tuners/loha/layer.py ADDED
@@ -0,0 +1,375 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+ from typing import Any, Set, Tuple
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ import torch.nn.functional as F
21
+
22
+ from peft.tuners.lycoris_utils import LycorisLayer
23
+
24
+
25
+ class LoHaLayer(nn.Module, LycorisLayer):
26
+ # All names of layers that may contain adapter weights
27
+ adapter_layer_names = ("hada_w1_a", "hada_w1_b", "hada_w2_a", "hada_w2_b", "hada_t1", "hada_t2")
28
+ # other_param_names is defined on parent class
29
+
30
+ def __init__(self, base_layer: nn.Module):
31
+ super().__init__()
32
+ LycorisLayer.__init__(self, base_layer)
33
+
34
+ # LoHa info
35
+ self.hada_w1_a = nn.ParameterDict({})
36
+ self.hada_w1_b = nn.ParameterDict({})
37
+ self.hada_w2_a = nn.ParameterDict({})
38
+ self.hada_w2_b = nn.ParameterDict({})
39
+ self.hada_t1 = nn.ParameterDict({})
40
+ self.hada_t2 = nn.ParameterDict({})
41
+
42
+ @property
43
+ def _available_adapters(self) -> Set[str]:
44
+ return {*self.hada_w1_a, *self.hada_w1_b, *self.hada_w2_a, *self.hada_w2_b, *self.hada_t1, *self.hada_t2}
45
+
46
+ def create_adapter_parameters(self, adapter_name: str, r: int, shape: Tuple[int, ...]):
47
+ # https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py#L130C9-L143C75
48
+ if len(shape) == 4:
49
+ self.hada_t1[adapter_name] = nn.Parameter(torch.empty(r, r, shape[2], shape[3]))
50
+ self.hada_w1_a[adapter_name] = nn.Parameter(torch.empty(r, shape[0])) # out_dim, 1-mode
51
+ self.hada_w1_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1])) # in_dim , 2-mode
52
+
53
+ self.hada_t2[adapter_name] = nn.Parameter(torch.empty(r, r, shape[2], shape[3]))
54
+ self.hada_w2_a[adapter_name] = nn.Parameter(torch.empty(r, shape[0])) # out_dim, 1-mode
55
+ self.hada_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1])) # in_dim , 2-mode
56
+ else:
57
+ self.hada_w1_a[adapter_name] = nn.Parameter(torch.empty(shape[0], r))
58
+ self.hada_w1_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1]))
59
+
60
+ self.hada_w2_a[adapter_name] = nn.Parameter(torch.empty(shape[0], r))
61
+ self.hada_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1]))
62
+
63
+ def reset_adapter_parameters(self, adapter_name: str):
64
+ # Original implementation performs initialization with normal distribution
65
+ # https://github.com/KohakuBlueleaf/LyCORIS/blob/3549fdef8f564761d68b695a08ef88b1122fdedc/lycoris/modules/loha.py#L158
66
+
67
+ # FedPara paper proposes to perform He initialization, let's stick with it
68
+ # It is enough to initialize only single matrix with zeros to make adapter do nothing after initialization
69
+ if adapter_name in self.hada_w1_a.keys():
70
+ nn.init.kaiming_uniform_(self.hada_w1_a[adapter_name], a=math.sqrt(5))
71
+ nn.init.kaiming_uniform_(self.hada_w1_b[adapter_name], a=math.sqrt(5))
72
+ nn.init.kaiming_uniform_(self.hada_w2_a[adapter_name], a=math.sqrt(5))
73
+ nn.init.zeros_(self.hada_w2_b[adapter_name])
74
+ if adapter_name in self.hada_t1.keys():
75
+ nn.init.kaiming_uniform_(self.hada_t1[adapter_name], a=math.sqrt(5))
76
+ nn.init.kaiming_uniform_(self.hada_t2[adapter_name], a=math.sqrt(5))
77
+
78
+ def reset_adapter_parameters_random(self, adapter_name: str):
79
+ # Original implementation performs initialization with normal distribution
80
+ # https://github.com/KohakuBlueleaf/LyCORIS/blob/3549fdef8f564761d68b695a08ef88b1122fdedc/lycoris/modules/loha.py#L158
81
+
82
+ # FedPara paper proposes to perform He initialization, let's stick with it
83
+ # It is enough to initialize only single matrix with zeros to make adapter do nothing after initialization
84
+ if adapter_name in self.hada_w1_a.keys():
85
+ nn.init.kaiming_uniform_(self.hada_w1_a[adapter_name], a=math.sqrt(5))
86
+ nn.init.kaiming_uniform_(self.hada_w1_b[adapter_name], a=math.sqrt(5))
87
+ nn.init.kaiming_uniform_(self.hada_w2_a[adapter_name], a=math.sqrt(5))
88
+ nn.init.kaiming_uniform_(self.hada_w2_b[adapter_name], a=math.sqrt(5))
89
+ if adapter_name in self.hada_t1.keys():
90
+ nn.init.kaiming_uniform_(self.hada_t1[adapter_name], a=math.sqrt(5))
91
+ nn.init.kaiming_uniform_(self.hada_t2[adapter_name], a=math.sqrt(5))
92
+
93
+ def update_layer(
94
+ self,
95
+ adapter_name: str,
96
+ r: int,
97
+ alpha: float,
98
+ rank_dropout: float,
99
+ module_dropout: float,
100
+ init_weights: bool,
101
+ use_effective_conv2d: bool = False,
102
+ **kwargs,
103
+ ) -> None:
104
+ """Internal function to create loha adapter
105
+
106
+ Args:
107
+ adapter_name (`str`): Name for the adapter to add.
108
+ r (`int`): Rank for the added adapter.
109
+ alpha (`float`): Alpha for the added adapter.
110
+ rank_dropout (`float`): The dropout probability for rank dimension during training.
111
+ module_dropout (`float`): The dropout probability for disabling adapter during training.
112
+ init_weights (`bool`): Whether to initialize weights.
113
+ use_effective_conv2d (`bool`, *optional*, defaults to `False`):
114
+ Use parameter effective decomposition for Conv2d with ksize > 1.
115
+ """
116
+ if r <= 0:
117
+ raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
118
+
119
+ self.r[adapter_name] = r
120
+ self.alpha[adapter_name] = alpha
121
+ self.scaling[adapter_name] = alpha / r
122
+ self.rank_dropout[adapter_name] = rank_dropout
123
+ self.module_dropout[adapter_name] = module_dropout
124
+
125
+ # Determine shape of LoHa weights
126
+ base_layer = self.get_base_layer()
127
+ if isinstance(base_layer, nn.Linear):
128
+ shape = tuple(base_layer.weight.shape)
129
+ elif isinstance(base_layer, nn.Conv2d):
130
+ use_effective_conv2d = use_effective_conv2d and base_layer.kernel_size != (1, 1)
131
+ if use_effective_conv2d:
132
+ shape = (base_layer.out_channels, base_layer.in_channels, *base_layer.kernel_size)
133
+ else:
134
+ shape = (
135
+ base_layer.out_channels,
136
+ base_layer.in_channels * base_layer.kernel_size[0] * base_layer.kernel_size[1],
137
+ )
138
+ else:
139
+ raise TypeError(f"LoHa is not implemented for base layers of type {type(base_layer).__name__}")
140
+
141
+ # Create weights with provided shape
142
+ self.create_adapter_parameters(adapter_name, r, shape)
143
+
144
+ # Initialize weights
145
+ if init_weights:
146
+ self.reset_adapter_parameters(adapter_name)
147
+ else:
148
+ self.reset_adapter_parameters_random(adapter_name)
149
+
150
+ # Move new weights to device
151
+ weight = getattr(self.get_base_layer(), "weight", None)
152
+ if weight is not None:
153
+ # the layer is already completely initialized, this is an update
154
+ if weight.dtype.is_floating_point or weight.dtype.is_complex:
155
+ self.to(weight.device, dtype=weight.dtype)
156
+ else:
157
+ self.to(weight.device)
158
+ self.set_adapter(self.active_adapters)
159
+
160
+ def get_delta_weight(self, adapter_name: str) -> torch.Tensor:
161
+ # https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py#L178
162
+ if adapter_name in self.hada_t1.keys():
163
+ weight = make_weight_cp(
164
+ self.hada_t1[adapter_name],
165
+ self.hada_w1_a[adapter_name],
166
+ self.hada_w1_b[adapter_name],
167
+ self.hada_t2[adapter_name],
168
+ self.hada_w2_a[adapter_name],
169
+ self.hada_w2_b[adapter_name],
170
+ scale=torch.tensor(self.scaling[adapter_name]),
171
+ )
172
+ else:
173
+ weight = make_weight(
174
+ self.hada_w1_a[adapter_name],
175
+ self.hada_w1_b[adapter_name],
176
+ self.hada_w2_a[adapter_name],
177
+ self.hada_w2_b[adapter_name],
178
+ scale=torch.tensor(self.scaling[adapter_name]),
179
+ )
180
+
181
+ base_layer = self.get_base_layer()
182
+ weight = weight.reshape(base_layer.weight.shape)
183
+
184
+ # Perform rank dropout during training - drop rows of addition weights
185
+ rank_dropout = self.rank_dropout[adapter_name]
186
+ if self.training and rank_dropout:
187
+ drop = (torch.rand(weight.size(0)) > rank_dropout).to(weight.dtype)
188
+ drop = drop.view(-1, *[1] * len(weight.shape[1:])).to(weight.device)
189
+ # TODO: Investigate if there should be a scaler like in normal dropout during training
190
+ # Original implementation doesn't have it
191
+ # https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py#L193
192
+ drop /= drop.mean()
193
+ weight *= drop
194
+
195
+ return weight
196
+
197
+ def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
198
+ previous_dtype = x.dtype
199
+
200
+ if self.disable_adapters:
201
+ if self.merged:
202
+ self.unmerge()
203
+ result = self.base_layer(x, *args, **kwargs)
204
+ elif self.merged:
205
+ result = self.base_layer(x, *args, **kwargs)
206
+ else:
207
+ result = self.base_layer(x, *args, **kwargs)
208
+
209
+ # Execute all the adapters
210
+ for active_adapter in self.active_adapters:
211
+ if active_adapter not in self._available_adapters:
212
+ continue
213
+
214
+ module_dropout = self.module_dropout[active_adapter]
215
+
216
+ # Modify current execution weights
217
+ if (not self.training) or (self.training and torch.rand(1) > module_dropout):
218
+ result = result + self._get_delta_activations(active_adapter, x, *args, **kwargs)
219
+
220
+ result = result.to(previous_dtype)
221
+ return result
222
+
223
+
224
+ class Linear(LoHaLayer):
225
+ """LoHa implemented in Linear layer"""
226
+
227
+ def __init__(
228
+ self,
229
+ base_layer: nn.Module,
230
+ adapter_name: str = "default",
231
+ r: int = 0,
232
+ alpha: float = 0.0,
233
+ rank_dropout: float = 0.0,
234
+ module_dropout: float = 0.0,
235
+ init_weights: bool = True,
236
+ **kwargs,
237
+ ):
238
+ super().__init__(base_layer)
239
+
240
+ # Create adapter and set it active
241
+ self._active_adapter = adapter_name
242
+ self.update_layer(adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, **kwargs)
243
+
244
+ def _get_delta_activations(
245
+ self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any
246
+ ) -> torch.Tensor:
247
+ delta_weight = self.get_delta_weight(adapter_name)
248
+ # don't add bias here, because the bias is already included in the output of the base_layer
249
+ return F.linear(input, delta_weight)
250
+
251
+ def __repr__(self) -> str:
252
+ rep = super().__repr__()
253
+ return "loha." + rep
254
+
255
+
256
+ class Conv2d(LoHaLayer):
257
+ """LoHa implemented in Conv2d layer"""
258
+
259
+ def __init__(
260
+ self,
261
+ base_layer: nn.Module,
262
+ adapter_name: str = "default",
263
+ r: int = 0,
264
+ alpha: float = 0.0,
265
+ rank_dropout: float = 0.0,
266
+ module_dropout: float = 0.0,
267
+ use_effective_conv2d: bool = False,
268
+ init_weights: bool = True,
269
+ **kwargs,
270
+ ):
271
+ super().__init__(base_layer)
272
+
273
+ # Create adapter and set it active
274
+ self._active_adapter = adapter_name
275
+ self.update_layer(
276
+ adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, use_effective_conv2d, **kwargs
277
+ )
278
+
279
+ def _get_delta_activations(
280
+ self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any
281
+ ) -> torch.Tensor:
282
+ delta_weight = self.get_delta_weight(adapter_name)
283
+ # don't add bias here, because the bias is already included in the output of the base_layer
284
+ base_layer = self.get_base_layer()
285
+ return F.conv2d(
286
+ input,
287
+ delta_weight,
288
+ stride=base_layer.stride,
289
+ padding=base_layer.padding,
290
+ dilation=base_layer.dilation,
291
+ groups=base_layer.groups,
292
+ )
293
+
294
+ def __repr__(self) -> str:
295
+ rep = super().__repr__()
296
+ return "loha." + rep
297
+
298
+
299
+ # Below code is a direct copy from https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py#L9
300
+
301
+
302
+ class HadaWeight(torch.autograd.Function):
303
+ @staticmethod
304
+ def forward(ctx, w1a, w1b, w2a, w2b, scale=torch.tensor(1)):
305
+ ctx.save_for_backward(w1a, w1b, w2a, w2b, scale)
306
+ diff_weight = ((w1a @ w1b) * (w2a @ w2b)) * scale
307
+ return diff_weight
308
+
309
+ @staticmethod
310
+ def backward(ctx, grad_out):
311
+ (w1a, w1b, w2a, w2b, scale) = ctx.saved_tensors
312
+ grad_out = grad_out * scale
313
+ temp = grad_out * (w2a @ w2b)
314
+ grad_w1a = temp @ w1b.T
315
+ grad_w1b = w1a.T @ temp
316
+
317
+ temp = grad_out * (w1a @ w1b)
318
+ grad_w2a = temp @ w2b.T
319
+ grad_w2b = w2a.T @ temp
320
+
321
+ del temp
322
+ return grad_w1a, grad_w1b, grad_w2a, grad_w2b, None
323
+
324
+
325
+ class HadaWeightCP(torch.autograd.Function):
326
+ @staticmethod
327
+ def forward(ctx, t1, w1a, w1b, t2, w2a, w2b, scale=torch.tensor(1)):
328
+ ctx.save_for_backward(t1, w1a, w1b, t2, w2a, w2b, scale)
329
+
330
+ rebuild1 = torch.einsum("i j k l, j r, i p -> p r k l", t1, w1b, w1a)
331
+ rebuild2 = torch.einsum("i j k l, j r, i p -> p r k l", t2, w2b, w2a)
332
+
333
+ return rebuild1 * rebuild2 * scale
334
+
335
+ @staticmethod
336
+ def backward(ctx, grad_out):
337
+ (t1, w1a, w1b, t2, w2a, w2b, scale) = ctx.saved_tensors
338
+ grad_out = grad_out * scale
339
+
340
+ temp = torch.einsum("i j k l, j r -> i r k l", t2, w2b)
341
+ rebuild = torch.einsum("i j k l, i r -> r j k l", temp, w2a)
342
+
343
+ grad_w = rebuild * grad_out
344
+ del rebuild
345
+
346
+ grad_w1a = torch.einsum("r j k l, i j k l -> r i", temp, grad_w)
347
+ grad_temp = torch.einsum("i j k l, i r -> r j k l", grad_w, w1a.T)
348
+ del grad_w, temp
349
+
350
+ grad_w1b = torch.einsum("i r k l, i j k l -> r j", t1, grad_temp)
351
+ grad_t1 = torch.einsum("i j k l, j r -> i r k l", grad_temp, w1b.T)
352
+ del grad_temp
353
+
354
+ temp = torch.einsum("i j k l, j r -> i r k l", t1, w1b)
355
+ rebuild = torch.einsum("i j k l, i r -> r j k l", temp, w1a)
356
+
357
+ grad_w = rebuild * grad_out
358
+ del rebuild
359
+
360
+ grad_w2a = torch.einsum("r j k l, i j k l -> r i", temp, grad_w)
361
+ grad_temp = torch.einsum("i j k l, i r -> r j k l", grad_w, w2a.T)
362
+ del grad_w, temp
363
+
364
+ grad_w2b = torch.einsum("i r k l, i j k l -> r j", t2, grad_temp)
365
+ grad_t2 = torch.einsum("i j k l, j r -> i r k l", grad_temp, w2b.T)
366
+ del grad_temp
367
+ return grad_t1, grad_w1a, grad_w1b, grad_t2, grad_w2a, grad_w2b, None
368
+
369
+
370
+ def make_weight(w1a, w1b, w2a, w2b, scale):
371
+ return HadaWeight.apply(w1a, w1b, w2a, w2b, scale)
372
+
373
+
374
+ def make_weight_cp(t1, w1a, w1b, t2, w2a, w2b, scale):
375
+ return HadaWeightCP.apply(t1, w1a, w1b, t2, w2a, w2b, scale)
venv/lib/python3.10/site-packages/peft/tuners/loha/model.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import re
16
+ from itertools import chain
17
+ from typing import Dict, Type, Union
18
+
19
+ import torch
20
+ from torch import nn
21
+
22
+ from peft.tuners.lycoris_utils import LycorisConfig, LycorisTuner
23
+
24
+ from .layer import Conv2d, Linear, LoHaLayer
25
+
26
+
27
+ class LoHaModel(LycorisTuner):
28
+ """
29
+ Creates Low-Rank Hadamard Product model from a pretrained model. The method is partially described in
30
+ https://arxiv.org/abs/2108.06098 Current implementation heavily borrows from
31
+ https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py
32
+
33
+ Args:
34
+ model (`torch.nn.Module`): The model to which the adapter tuner layers will be attached.
35
+ config ([`LoHaConfig`]): The configuration of the LoHa model.
36
+ adapter_name (`str`): The name of the adapter, defaults to `"default"`.
37
+
38
+ Returns:
39
+ `torch.nn.Module`: The LoHa model.
40
+
41
+ Example:
42
+ ```py
43
+ >>> from diffusers import StableDiffusionPipeline
44
+ >>> from peft import LoHaModel, LoHaConfig
45
+
46
+ >>> config_te = LoHaConfig(
47
+ ... r=8,
48
+ ... lora_alpha=32,
49
+ ... target_modules=["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"],
50
+ ... rank_dropout=0.0,
51
+ ... module_dropout=0.0,
52
+ ... init_weights=True,
53
+ ... )
54
+ >>> config_unet = LoHaConfig(
55
+ ... r=8,
56
+ ... lora_alpha=32,
57
+ ... target_modules=[
58
+ ... "proj_in",
59
+ ... "proj_out",
60
+ ... "to_k",
61
+ ... "to_q",
62
+ ... "to_v",
63
+ ... "to_out.0",
64
+ ... "ff.net.0.proj",
65
+ ... "ff.net.2",
66
+ ... ],
67
+ ... rank_dropout=0.0,
68
+ ... module_dropout=0.0,
69
+ ... init_weights=True,
70
+ ... use_effective_conv2d=True,
71
+ ... )
72
+
73
+ >>> model = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
74
+ >>> model.text_encoder = LoHaModel(model.text_encoder, config_te, "default")
75
+ >>> model.unet = LoHaModel(model.unet, config_unet, "default")
76
+ ```
77
+
78
+ **Attributes**:
79
+ - **model** ([`~torch.nn.Module`]) -- The model to be adapted.
80
+ - **peft_config** ([`LoHaConfig`]): The configuration of the LoHa model.
81
+ """
82
+
83
+ prefix: str = "hada_"
84
+ layers_mapping: Dict[Type[torch.nn.Module], Type[LoHaLayer]] = {
85
+ torch.nn.Conv2d: Conv2d,
86
+ torch.nn.Linear: Linear,
87
+ }
88
+
89
+ def _create_and_replace(
90
+ self,
91
+ config: LycorisConfig,
92
+ adapter_name: str,
93
+ target: Union[LoHaLayer, nn.Module],
94
+ target_name: str,
95
+ parent: nn.Module,
96
+ current_key: str,
97
+ ) -> None:
98
+ """
99
+ A private method to create and replace the target module with the adapter module.
100
+ """
101
+
102
+ # Regexp matching - Find key which matches current target_name in patterns provided
103
+ pattern_keys = list(chain(config.rank_pattern.keys(), config.alpha_pattern.keys()))
104
+ target_name_key = next(filter(lambda key: re.match(rf"(.*\.)?{key}$", current_key), pattern_keys), target_name)
105
+
106
+ kwargs = config.to_dict()
107
+ kwargs["r"] = config.rank_pattern.get(target_name_key, config.r)
108
+ kwargs["alpha"] = config.alpha_pattern.get(target_name_key, config.alpha)
109
+
110
+ if isinstance(target, LoHaLayer):
111
+ target.update_layer(adapter_name, **kwargs)
112
+ else:
113
+ new_module = self._create_new_module(config, adapter_name, target, **kwargs)
114
+ self._replace_module(parent, target_name, new_module, target)
venv/lib/python3.10/site-packages/peft/tuners/lokr/__init__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .config import LoKrConfig
16
+ from .layer import Conv2d, Linear, LoKrLayer
17
+ from .model import LoKrModel
18
+
19
+
20
+ __all__ = ["LoKrConfig", "LoKrModel", "Conv2d", "Linear", "LoKrLayer"]
venv/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (383 Bytes). View file
 
venv/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/config.cpython-310.pyc ADDED
Binary file (5.65 kB). View file
 
venv/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/layer.cpython-310.pyc ADDED
Binary file (10.8 kB). View file
 
venv/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/model.cpython-310.pyc ADDED
Binary file (3.95 kB). View file
 
venv/lib/python3.10/site-packages/peft/tuners/lokr/config.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from dataclasses import dataclass, field
16
+ from typing import List, Optional, Union
17
+
18
+ from peft.tuners.lycoris_utils import LycorisConfig
19
+ from peft.utils import PeftType
20
+
21
+
22
+ @dataclass
23
+ class LoKrConfig(LycorisConfig):
24
+ """
25
+ Configuration class of [`LoKrModel`].
26
+
27
+ Args:
28
+ r (`int`):
29
+ LoKr rank.
30
+ alpha (`int`):
31
+ The alpha parameter for LoKr scaling.
32
+ rank_dropout (`float`):
33
+ The dropout probability for rank dimension during training.
34
+ module_dropout (`float`):
35
+ The dropout probability for disabling LoKr modules during training.
36
+ use_effective_conv2d (`bool`):
37
+ Use parameter effective decomposition for Conv2d with ksize > 1 ("Proposition 3" from FedPara paper).
38
+ decompose_both (`bool`):
39
+ Perform rank decomposition of left kronecker product matrix.
40
+ decompose_factor (`int`):
41
+ Kronecker product decomposition factor.
42
+ target_modules (`Optional[Union[List[str], str]]`):
43
+ The names of the modules to apply the adapter to. If this is specified, only the modules with the specified
44
+ names will be replaced. When passing a string, a regex match will be performed. When passing a list of
45
+ strings, either an exact match will be performed or it is checked if the name of the module ends with any
46
+ of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen,
47
+ excluding the output layer. If this is not specified, modules will be chosen according to the model
48
+ architecture. If the architecture is not known, an error will be raised -- in this case, you should specify
49
+ the target modules manually.
50
+ init_weights (`bool`):
51
+ Whether to perform initialization of adapter weights. This defaults to `True`, passing `False` is
52
+ discouraged.
53
+ layers_to_transform (`Union[List[int], int]`):
54
+ The layer indices to transform. If a list of ints is passed, it will apply the adapter to the layer indices
55
+ that are specified in this list. If a single integer is passed, it will apply the transformations on the
56
+ layer at this index.
57
+ layers_pattern (`str`):
58
+ The layer pattern name, used only if `layers_to_transform` is different from `None`.
59
+ rank_pattern (`dict`):
60
+ The mapping from layer names or regexp expression to ranks which are different from the default rank
61
+ specified by `r`.
62
+ alpha_pattern (`dict`):
63
+ The mapping from layer names or regexp expression to alphas which are different from the default alpha
64
+ specified by `alpha`.
65
+ modules_to_save (`Optional[List[str]]`):
66
+ List of modules apart from adapter layers to be set as trainable and saved in the final checkpoint.
67
+ """
68
+
69
+ r: int = field(default=8, metadata={"help": "LoKr rank"})
70
+ alpha: int = field(default=8, metadata={"help": "LoKr alpha"})
71
+ rank_dropout: float = field(
72
+ default=0.0, metadata={"help": "The dropout probability for rank dimension during training"}
73
+ )
74
+ module_dropout: float = field(
75
+ default=0.0, metadata={"help": "The dropout probability for disabling LoKr modules during training"}
76
+ )
77
+ use_effective_conv2d: bool = field(
78
+ default=False,
79
+ metadata={
80
+ "help": 'Use parameter effective decomposition for Conv2d 3x3 with ksize > 1 ("Proposition 3" from FedPara paper)'
81
+ },
82
+ )
83
+ decompose_both: bool = field(
84
+ default=False,
85
+ metadata={"help": "Perform rank decomposition of left kronecker product matrix."},
86
+ )
87
+ decompose_factor: int = field(default=-1, metadata={"help": "Kronecker product decomposition factor."})
88
+ target_modules: Optional[Union[List[str], str]] = field(
89
+ default=None,
90
+ metadata={
91
+ "help": "List of module names or regex expression of the module names to replace with LoKr."
92
+ "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' "
93
+ "This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer."
94
+ },
95
+ )
96
+ init_weights: bool = field(
97
+ default=True,
98
+ metadata={
99
+ "help": (
100
+ "Whether to initialize the weights of the LoKr layers with their default initialization. Don't change "
101
+ "this setting, except if you know exactly what you're doing."
102
+ ),
103
+ },
104
+ )
105
+ layers_to_transform: Optional[Union[List[int], int]] = field(
106
+ default=None,
107
+ metadata={
108
+ "help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index."
109
+ },
110
+ )
111
+ layers_pattern: Optional[str] = field(
112
+ default=None,
113
+ metadata={
114
+ "help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern."
115
+ },
116
+ )
117
+ modules_to_save: Optional[List[str]] = field(
118
+ default=None,
119
+ metadata={
120
+ "help": "List of modules apart from LoKr layers to be set as trainable and saved in the final checkpoint. "
121
+ "For example, in Sequence Classification or Token Classification tasks, "
122
+ "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved."
123
+ },
124
+ )
125
+
126
+ def __post_init__(self):
127
+ self.peft_type = PeftType.LOKR
venv/lib/python3.10/site-packages/peft/tuners/lokr/layer.py ADDED
@@ -0,0 +1,409 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+ from typing import Any, Optional, Set, Tuple, Union
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ import torch.nn.functional as F
21
+
22
+ from peft.tuners.lycoris_utils import LycorisLayer
23
+
24
+
25
+ class LoKrLayer(nn.Module, LycorisLayer):
26
+ # All names of layers that may contain adapter weights
27
+ adapter_layer_names = (
28
+ "lokr_w1",
29
+ "lokr_w1_a",
30
+ "lokr_w1_b",
31
+ "lokr_w2",
32
+ "lokr_w2_a",
33
+ "lokr_w2_b",
34
+ "lokr_t2",
35
+ )
36
+ # other_param_names is defined on parent class
37
+
38
+ def __init__(self, base_layer: nn.Module) -> None:
39
+ super().__init__()
40
+ LycorisLayer.__init__(self, base_layer)
41
+
42
+ # LoKr info
43
+ self.lokr_w1 = nn.ParameterDict({})
44
+ self.lokr_w1_a = nn.ParameterDict({})
45
+ self.lokr_w1_b = nn.ParameterDict({})
46
+ self.lokr_w2 = nn.ParameterDict({})
47
+ self.lokr_w2_a = nn.ParameterDict({})
48
+ self.lokr_w2_b = nn.ParameterDict({})
49
+ self.lokr_t2 = nn.ParameterDict({})
50
+
51
+ @property
52
+ def _available_adapters(self) -> Set[str]:
53
+ return {
54
+ *self.lokr_w1,
55
+ *self.lokr_w1_a,
56
+ *self.lokr_w1_b,
57
+ *self.lokr_w2,
58
+ *self.lokr_w2_a,
59
+ *self.lokr_w2_b,
60
+ *self.lokr_t2,
61
+ }
62
+
63
+ def create_adapter_parameters(
64
+ self,
65
+ adapter_name: str,
66
+ r: int,
67
+ shape,
68
+ use_w1: bool,
69
+ use_w2: bool,
70
+ use_effective_conv2d: bool,
71
+ ):
72
+ if use_w1:
73
+ self.lokr_w1[adapter_name] = nn.Parameter(torch.empty(shape[0][0], shape[1][0]))
74
+ else:
75
+ self.lokr_w1_a[adapter_name] = nn.Parameter(torch.empty(shape[0][0], r))
76
+ self.lokr_w1_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][0]))
77
+
78
+ if len(shape) == 4:
79
+ # Conv2d
80
+ if use_w2:
81
+ self.lokr_w2[adapter_name] = nn.Parameter(torch.empty(shape[0][1], shape[1][1], *shape[2:]))
82
+ elif use_effective_conv2d:
83
+ self.lokr_t2[adapter_name] = nn.Parameter(torch.empty(r, r, shape[2], shape[3]))
84
+ self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(r, shape[0][1])) # b, 1-mode
85
+ self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1])) # d, 2-mode
86
+ else:
87
+ self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(shape[0][1], r))
88
+ self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1] * shape[2] * shape[3]))
89
+ else:
90
+ # Linear
91
+ if use_w2:
92
+ self.lokr_w2[adapter_name] = nn.Parameter(torch.empty(shape[0][1], shape[1][1]))
93
+ else:
94
+ self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(shape[0][1], r))
95
+ self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1]))
96
+
97
+ def reset_adapter_parameters(self, adapter_name: str):
98
+ if adapter_name in self.lokr_w1:
99
+ nn.init.zeros_(self.lokr_w1[adapter_name])
100
+ else:
101
+ nn.init.zeros_(self.lokr_w1_a[adapter_name])
102
+ nn.init.kaiming_uniform_(self.lokr_w1_b[adapter_name], a=math.sqrt(5))
103
+
104
+ if adapter_name in self.lokr_w2:
105
+ nn.init.kaiming_uniform_(self.lokr_w2[adapter_name], a=math.sqrt(5))
106
+ else:
107
+ nn.init.kaiming_uniform_(self.lokr_w2_a[adapter_name], a=math.sqrt(5))
108
+ nn.init.kaiming_uniform_(self.lokr_w2_b[adapter_name], a=math.sqrt(5))
109
+
110
+ if adapter_name in self.lokr_t2:
111
+ nn.init.kaiming_uniform_(self.lokr_t2[adapter_name], a=math.sqrt(5))
112
+
113
+ def reset_adapter_parameters_random(self, adapter_name: str):
114
+ if adapter_name in self.lokr_w1:
115
+ nn.init.kaiming_uniform_(self.lokr_w1[adapter_name], a=math.sqrt(5))
116
+ else:
117
+ nn.init.kaiming_uniform_(self.lokr_w1_a[adapter_name], a=math.sqrt(5))
118
+ nn.init.kaiming_uniform_(self.lokr_w1_b[adapter_name], a=math.sqrt(5))
119
+
120
+ if adapter_name in self.lokr_w2:
121
+ nn.init.kaiming_uniform_(self.lokr_w2[adapter_name], a=math.sqrt(5))
122
+ else:
123
+ nn.init.kaiming_uniform_(self.lokr_w2_a[adapter_name], a=math.sqrt(5))
124
+ nn.init.kaiming_uniform_(self.lokr_w2_b[adapter_name], a=math.sqrt(5))
125
+
126
+ if adapter_name in self.lokr_t2:
127
+ nn.init.kaiming_uniform_(self.lokr_t2[adapter_name], a=math.sqrt(5))
128
+
129
+ def update_layer(
130
+ self,
131
+ adapter_name: str,
132
+ r: int,
133
+ alpha: float,
134
+ rank_dropout: float,
135
+ module_dropout: float,
136
+ init_weights: bool,
137
+ use_effective_conv2d: bool,
138
+ decompose_both: bool,
139
+ decompose_factor: int,
140
+ **kwargs,
141
+ ) -> None:
142
+ """Internal function to create lokr adapter
143
+
144
+ Args:
145
+ adapter_name (`str`): Name for the adapter to add.
146
+ r (`int`): Rank for the added adapter.
147
+ alpha (`float`): Alpha for the added adapter.
148
+ rank_dropout (`float`): The dropout probability for rank dimension during training
149
+ module_dropout (`float`): The dropout probability for disabling adapter during training.
150
+ init_weights (`bool`): Whether to initialize adapter weights.
151
+ use_effective_conv2d (`bool`): Use parameter effective decomposition for Conv2d with ksize > 1.
152
+ decompose_both (`bool`): Perform rank decomposition of left kronecker product matrix.
153
+ decompose_factor (`int`): Kronecker product decomposition factor.
154
+ """
155
+ if r <= 0:
156
+ raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
157
+
158
+ self.r[adapter_name] = r
159
+ self.alpha[adapter_name] = alpha
160
+ self.scaling[adapter_name] = alpha / r
161
+ self.rank_dropout[adapter_name] = rank_dropout
162
+ self.module_dropout[adapter_name] = module_dropout
163
+ base_layer = self.get_base_layer()
164
+
165
+ # Determine shape of LoKr weights
166
+ if isinstance(base_layer, nn.Linear):
167
+ in_dim, out_dim = base_layer.in_features, base_layer.out_features
168
+
169
+ in_m, in_n = factorization(in_dim, decompose_factor)
170
+ out_l, out_k = factorization(out_dim, decompose_factor)
171
+ shape = ((out_l, out_k), (in_m, in_n)) # ((a, b), (c, d)), out_dim = a*c, in_dim = b*d
172
+
173
+ use_w1 = not (decompose_both and r < max(shape[0][0], shape[1][0]) / 2)
174
+ use_w2 = not (r < max(shape[0][1], shape[1][1]) / 2)
175
+ use_effective_conv2d = False
176
+ elif isinstance(base_layer, nn.Conv2d):
177
+ in_dim, out_dim = base_layer.in_channels, base_layer.out_channels
178
+ k_size = base_layer.kernel_size
179
+
180
+ in_m, in_n = factorization(in_dim, decompose_factor)
181
+ out_l, out_k = factorization(out_dim, decompose_factor)
182
+ shape = ((out_l, out_k), (in_m, in_n), *k_size) # ((a, b), (c, d), *k_size)
183
+
184
+ use_w1 = not (decompose_both and r < max(shape[0][0], shape[1][0]) / 2)
185
+ use_w2 = r >= max(shape[0][1], shape[1][1]) / 2
186
+ use_effective_conv2d = use_effective_conv2d and base_layer.kernel_size != (1, 1)
187
+ else:
188
+ raise TypeError(f"LoKr is not implemented for base layers of type {type(base_layer).__name__}")
189
+
190
+ # Create weights with provided shape
191
+ self.create_adapter_parameters(adapter_name, r, shape, use_w1, use_w2, use_effective_conv2d)
192
+
193
+ # Initialize weights
194
+ if init_weights:
195
+ self.reset_adapter_parameters(adapter_name)
196
+ else:
197
+ self.reset_adapter_parameters_random(adapter_name)
198
+
199
+ # Move new weights to device
200
+ weight = getattr(self.get_base_layer(), "weight", None)
201
+ if weight is not None:
202
+ # the layer is already completely initialized, this is an update
203
+ if weight.dtype.is_floating_point or weight.dtype.is_complex:
204
+ self.to(weight.device, dtype=weight.dtype)
205
+ else:
206
+ self.to(weight.device)
207
+ self.set_adapter(self.active_adapters)
208
+
209
+ def get_delta_weight(self, adapter_name: str) -> torch.Tensor:
210
+ # https://github.com/KohakuBlueleaf/LyCORIS/blob/e4259b870d3354a9615a96be61cb5d07455c58ea/lycoris/modules/lokr.py#L224
211
+ if adapter_name in self.lokr_w1:
212
+ w1 = self.lokr_w1[adapter_name]
213
+ else:
214
+ w1 = self.lokr_w1_a[adapter_name] @ self.lokr_w1_b[adapter_name]
215
+
216
+ if adapter_name in self.lokr_w2:
217
+ w2 = self.lokr_w2[adapter_name]
218
+ elif adapter_name in self.lokr_t2:
219
+ w2 = make_weight_cp(self.lokr_t2[adapter_name], self.lokr_w2_a[adapter_name], self.lokr_w2_b[adapter_name])
220
+ else:
221
+ w2 = self.lokr_w2_a[adapter_name] @ self.lokr_w2_b[adapter_name]
222
+
223
+ # Make weights with Kronecker product
224
+ weight = make_kron(w1, w2)
225
+ weight = weight.reshape(self.get_base_layer().weight.shape)
226
+
227
+ # Perform rank dropout during training - drop rows of addition weights
228
+ rank_dropout = self.rank_dropout[adapter_name]
229
+ if self.training and rank_dropout:
230
+ drop = (torch.rand(weight.size(0)) > rank_dropout).float()
231
+ drop = drop.view(-1, *[1] * len(weight.shape[1:])).to(weight.device)
232
+ drop /= drop.mean()
233
+ weight *= drop
234
+
235
+ return weight
236
+
237
+ def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
238
+ previous_dtype = x.dtype
239
+
240
+ if self.disable_adapters:
241
+ if self.merged:
242
+ self.unmerge()
243
+ result = self.base_layer(x, *args, **kwargs)
244
+ elif self.merged:
245
+ result = self.base_layer(x, *args, **kwargs)
246
+ else:
247
+ result = self.base_layer(x, *args, **kwargs)
248
+
249
+ # Execute all the adapters
250
+ for active_adapter in self.active_adapters:
251
+ if active_adapter not in self._available_adapters:
252
+ continue
253
+
254
+ module_dropout = self.module_dropout[active_adapter]
255
+
256
+ # Modify current execution weights
257
+ if (not self.training) or (self.training and torch.rand(1) > module_dropout):
258
+ result = result + self._get_delta_activations(active_adapter, x, *args, **kwargs)
259
+
260
+ result = result.to(previous_dtype)
261
+ return result
262
+
263
+
264
+ class Linear(LoKrLayer):
265
+ """LoKr implemented in Linear layer"""
266
+
267
+ def __init__(
268
+ self,
269
+ base_layer: nn.Module,
270
+ device: Optional[Union[str, torch.device]] = None,
271
+ dtype: Optional[torch.dtype] = None,
272
+ adapter_name: str = "default",
273
+ r: int = 0,
274
+ alpha: float = 0.0,
275
+ rank_dropout: float = 0.0,
276
+ module_dropout: float = 0.0,
277
+ init_weights: bool = True,
278
+ **kwargs,
279
+ ):
280
+ super().__init__(base_layer)
281
+
282
+ # Create adapter and set it active
283
+ self._active_adapter = adapter_name
284
+ self.update_layer(adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, **kwargs)
285
+
286
+ def _get_delta_activations(
287
+ self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any
288
+ ) -> torch.Tensor:
289
+ delta_weight = self.get_delta_weight(adapter_name)
290
+ # don't add bias here, because the bias is already included in the output of the base_layer
291
+ return F.linear(input, delta_weight)
292
+
293
+ def __repr__(self) -> str:
294
+ rep = super().__repr__()
295
+ return "lokr." + rep
296
+
297
+
298
+ class Conv2d(LoKrLayer):
299
+ """LoKr implemented in Conv2d layer"""
300
+
301
+ def __init__(
302
+ self,
303
+ base_layer: nn.Module,
304
+ device: Optional[Union[str, torch.device]] = None,
305
+ dtype: Optional[torch.dtype] = None,
306
+ adapter_name: str = "default",
307
+ r: int = 0,
308
+ alpha: float = 0.0,
309
+ rank_dropout: float = 0.0,
310
+ module_dropout: float = 0.0,
311
+ use_effective_conv2d: bool = False,
312
+ init_weights: bool = True,
313
+ **kwargs,
314
+ ):
315
+ super().__init__(base_layer)
316
+
317
+ # Create adapter and set it active
318
+ self._active_adapter = adapter_name
319
+ self.update_layer(
320
+ adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, use_effective_conv2d, **kwargs
321
+ )
322
+
323
+ def _get_delta_activations(
324
+ self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any
325
+ ) -> torch.Tensor:
326
+ delta_weight = self.get_delta_weight(adapter_name)
327
+ # don't add bias here, because the bias is already included in the output of the base_layer
328
+ base_layer = self.get_base_layer()
329
+ return F.conv2d(
330
+ input,
331
+ delta_weight,
332
+ stride=base_layer.stride,
333
+ padding=base_layer.padding,
334
+ dilation=base_layer.dilation,
335
+ groups=base_layer.groups,
336
+ )
337
+
338
+ def __repr__(self) -> str:
339
+ rep = super().__repr__()
340
+ return "lokr." + rep
341
+
342
+
343
+ # Below code is a direct copy from https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/lokr.py#L11
344
+
345
+
346
+ def factorization(dimension: int, factor: int = -1) -> Tuple[int, int]:
347
+ """Factorizes the provided number into the product of two numbers
348
+
349
+ Args:
350
+ dimension (`int`): The number that needs to be factorized.
351
+ factor (`int`, optional):
352
+ Factorization divider. The algorithm will try to output two numbers, one of each will be as close to the
353
+ factor as possible. If -1 is provided, the decomposition algorithm would try to search dividers near the
354
+ square root of the dimension. Defaults to -1.
355
+
356
+ Returns:
357
+ Tuple[`int`, `int`]: A tuple of two numbers, whose product is equal to the provided number. The first number is
358
+ always less than or equal to the second.
359
+
360
+ Example:
361
+ ```py
362
+ >>> factorization(256, factor=-1)
363
+ (16, 16)
364
+
365
+ >>> factorization(128, factor=-1)
366
+ (8, 16)
367
+
368
+ >>> factorization(127, factor=-1)
369
+ (1, 127)
370
+
371
+ >>> factorization(128, factor=4)
372
+ (4, 32)
373
+ ```
374
+ """
375
+
376
+ if factor > 0 and (dimension % factor) == 0:
377
+ m = factor
378
+ n = dimension // factor
379
+ return m, n
380
+ if factor == -1:
381
+ factor = dimension
382
+ m, n = 1, dimension
383
+ length = m + n
384
+ while m < n:
385
+ new_m = m + 1
386
+ while dimension % new_m != 0:
387
+ new_m += 1
388
+ new_n = dimension // new_m
389
+ if new_m + new_n > length or new_m > factor:
390
+ break
391
+ else:
392
+ m, n = new_m, new_n
393
+ if m > n:
394
+ n, m = m, n
395
+ return m, n
396
+
397
+
398
+ def make_weight_cp(t, wa, wb):
399
+ rebuild2 = torch.einsum("i j k l, i p, j r -> p r k l", t, wa, wb) # [c, d, k1, k2]
400
+ return rebuild2
401
+
402
+
403
+ def make_kron(w1, w2, scale=1.0):
404
+ if len(w2.shape) == 4:
405
+ w1 = w1.unsqueeze(2).unsqueeze(2)
406
+ w2 = w2.contiguous()
407
+ rebuild = torch.kron(w1, w2)
408
+
409
+ return rebuild * scale
venv/lib/python3.10/site-packages/peft/tuners/lokr/model.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import re
16
+ from itertools import chain
17
+ from typing import Dict, Type, Union
18
+
19
+ import torch
20
+ from torch import nn
21
+
22
+ from peft.tuners.lycoris_utils import LycorisConfig, LycorisTuner
23
+
24
+ from .layer import Conv2d, Linear, LoKrLayer
25
+
26
+
27
+ class LoKrModel(LycorisTuner):
28
+ """
29
+ Creates Low-Rank Kronecker Product model from a pretrained model. The original method is partially described in
30
+ https://arxiv.org/abs/2108.06098 and in https://arxiv.org/abs/2309.14859 Current implementation heavily borrows
31
+ from
32
+ https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/lokr.py
33
+
34
+ Args:
35
+ model (`torch.nn.Module`): The model to which the adapter tuner layers will be attached.
36
+ config ([`LoKrConfig`]): The configuration of the LoKr model.
37
+ adapter_name (`str`): The name of the adapter, defaults to `"default"`.
38
+
39
+ Returns:
40
+ `torch.nn.Module`: The LoKr model.
41
+
42
+ Example:
43
+ ```py
44
+ >>> from diffusers import StableDiffusionPipeline
45
+ >>> from peft import LoKrModel, LoKrConfig
46
+
47
+ >>> config_te = LoKrConfig(
48
+ ... r=8,
49
+ ... lora_alpha=32,
50
+ ... target_modules=["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"],
51
+ ... rank_dropout=0.0,
52
+ ... module_dropout=0.0,
53
+ ... init_weights=True,
54
+ ... )
55
+ >>> config_unet = LoKrConfig(
56
+ ... r=8,
57
+ ... lora_alpha=32,
58
+ ... target_modules=[
59
+ ... "proj_in",
60
+ ... "proj_out",
61
+ ... "to_k",
62
+ ... "to_q",
63
+ ... "to_v",
64
+ ... "to_out.0",
65
+ ... "ff.net.0.proj",
66
+ ... "ff.net.2",
67
+ ... ],
68
+ ... rank_dropout=0.0,
69
+ ... module_dropout=0.0,
70
+ ... init_weights=True,
71
+ ... use_effective_conv2d=True,
72
+ ... )
73
+
74
+ >>> model = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
75
+ >>> model.text_encoder = LoKrModel(model.text_encoder, config_te, "default")
76
+ >>> model.unet = LoKrModel(model.unet, config_unet, "default")
77
+ ```
78
+
79
+ **Attributes**:
80
+ - **model** ([`~torch.nn.Module`]) -- The model to be adapted.
81
+ - **peft_config** ([`LoKrConfig`]): The configuration of the LoKr model.
82
+ """
83
+
84
+ prefix: str = "lokr_"
85
+ layers_mapping: Dict[Type[torch.nn.Module], Type[LoKrLayer]] = {
86
+ torch.nn.Conv2d: Conv2d,
87
+ torch.nn.Linear: Linear,
88
+ }
89
+
90
+ def _create_and_replace(
91
+ self,
92
+ config: LycorisConfig,
93
+ adapter_name: str,
94
+ target: Union[LoKrLayer, nn.Module],
95
+ target_name: str,
96
+ parent: nn.Module,
97
+ current_key: str,
98
+ ) -> None:
99
+ """
100
+ A private method to create and replace the target module with the adapter module.
101
+ """
102
+
103
+ # Regexp matching - Find key which matches current target_name in patterns provided
104
+ pattern_keys = list(chain(config.rank_pattern.keys(), config.alpha_pattern.keys()))
105
+ target_name_key = next(filter(lambda key: re.match(rf"(.*\.)?{key}$", current_key), pattern_keys), target_name)
106
+
107
+ kwargs = config.to_dict()
108
+ kwargs["r"] = config.rank_pattern.get(target_name_key, config.r)
109
+ kwargs["alpha"] = config.alpha_pattern.get(target_name_key, config.alpha)
110
+
111
+ if isinstance(target, LoKrLayer):
112
+ target.update_layer(adapter_name, **kwargs)
113
+ else:
114
+ new_module = self._create_new_module(config, adapter_name, target, **kwargs)
115
+ self._replace_module(parent, target_name, new_module, target)
venv/lib/python3.10/site-packages/peft/tuners/lora/__init__.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from peft.import_utils import is_bnb_4bit_available, is_bnb_available
16
+
17
+ from .config import LoftQConfig, LoraConfig
18
+ from .gptq import QuantLinear
19
+ from .layer import Conv2d, Embedding, Linear, LoraLayer
20
+ from .model import LoraModel
21
+
22
+
23
+ __all__ = ["LoraConfig", "LoftQConfig", "Conv2d", "Embedding", "LoraLayer", "Linear", "LoraModel", "QuantLinear"]
24
+
25
+
26
+ def __getattr__(name):
27
+ if (name == "Linear8bitLt") and is_bnb_available():
28
+ from .bnb import Linear8bitLt
29
+
30
+ return Linear8bitLt
31
+
32
+ if (name == "Linear4bit") and is_bnb_4bit_available():
33
+ from .bnb import Linear4bit
34
+
35
+ return Linear4bit
36
+
37
+ raise AttributeError(f"module {__name__} has no attribute {name}")
venv/lib/python3.10/site-packages/peft/tuners/lora/aqlm.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Any, Optional
16
+
17
+ import torch
18
+
19
+ from peft.import_utils import is_aqlm_available
20
+ from peft.tuners.lora.layer import LoraLayer
21
+ from peft.tuners.tuners_utils import BaseTunerLayer
22
+
23
+
24
+ if is_aqlm_available():
25
+ from aqlm import QuantizedLinear
26
+
27
+
28
+ class AqlmLoraLinear(torch.nn.Module, LoraLayer):
29
+ def __init__(
30
+ self,
31
+ base_layer,
32
+ adapter_name: str,
33
+ r: int = 0,
34
+ lora_alpha: int = 1,
35
+ lora_dropout: float = 0.0,
36
+ init_lora_weights: bool = True,
37
+ use_rslora: bool = False,
38
+ **kwargs,
39
+ ):
40
+ super().__init__()
41
+ LoraLayer.__init__(self, base_layer)
42
+
43
+ self._active_adapter = adapter_name
44
+ self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora)
45
+
46
+ def forward(self, x: torch.Tensor):
47
+ # note: logic differs from default Linear because merging is not supported
48
+ result = self.base_layer(x)
49
+
50
+ if self.disable_adapters:
51
+ return result
52
+
53
+ for active_adapter in self.active_adapters:
54
+ if active_adapter not in self.lora_A.keys():
55
+ continue
56
+ lora_A = self.lora_A[active_adapter]
57
+ lora_B = self.lora_B[active_adapter]
58
+ dropout = self.lora_dropout[active_adapter]
59
+ scaling = self.scaling[active_adapter]
60
+
61
+ requires_conversion = not torch.is_autocast_enabled()
62
+ if requires_conversion:
63
+ expected_dtype = result.dtype
64
+ x = x.to(lora_A.weight.dtype)
65
+
66
+ output = lora_B(lora_A(dropout(x)))
67
+ if requires_conversion:
68
+ output = output.to(expected_dtype)
69
+ output = output * scaling
70
+ result += output
71
+ return result
72
+
73
+ def __repr__(self) -> str:
74
+ rep = super().__repr__()
75
+ return "lora." + rep
76
+
77
+ # TODO: Check if it is better as suggested by users https://github.com/PanQiWei/AutoGPTQ/pull/102
78
+ # def reset_lora_parameters(self, adapter_name):
79
+ # if adapter_name in self.lora_A.keys():
80
+ # torch.nn.init.xavier_uniform_(self.lora_A[adapter_name].weight)
81
+ # torch.nn.init.zeros_(self.lora_B[adapter_name].weight)
82
+
83
+
84
+ def dispatch_aqlm(
85
+ target: torch.nn.Module,
86
+ adapter_name: str,
87
+ **kwargs: Any,
88
+ ) -> Optional[torch.nn.Module]:
89
+ new_module = None
90
+
91
+ if isinstance(target, BaseTunerLayer):
92
+ target_base_layer = target.get_base_layer()
93
+ else:
94
+ target_base_layer = target
95
+
96
+ if is_aqlm_available() and isinstance(target_base_layer, QuantizedLinear):
97
+ new_module = AqlmLoraLinear(target, adapter_name, **kwargs)
98
+ target.qweight = target_base_layer.codes
99
+
100
+ return new_module
venv/lib/python3.10/site-packages/peft/tuners/lora/awq.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import importlib.metadata as importlib_metadata
15
+ from typing import Any, Optional
16
+
17
+ import packaging.version
18
+ import torch
19
+
20
+ from peft.import_utils import is_auto_awq_available
21
+ from peft.tuners.lora.layer import LoraLayer
22
+ from peft.tuners.tuners_utils import BaseTunerLayer
23
+
24
+
25
+ if is_auto_awq_available():
26
+ from awq.modules.linear import WQLinear_GEMM
27
+
28
+
29
+ class AwqLoraLinear(torch.nn.Module, LoraLayer):
30
+ def __init__(
31
+ self,
32
+ base_layer,
33
+ adapter_name,
34
+ r: int = 0,
35
+ lora_alpha: int = 1,
36
+ lora_dropout: float = 0.0,
37
+ init_lora_weights: bool = True,
38
+ use_rslora: bool = False,
39
+ **kwargs,
40
+ ):
41
+ super().__init__()
42
+ LoraLayer.__init__(self, base_layer)
43
+
44
+ # self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter
45
+ # for backwards compatibility
46
+ self.quant_linear_module = base_layer
47
+
48
+ self._active_adapter = adapter_name
49
+ self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora)
50
+
51
+ def forward(self, x: torch.Tensor):
52
+ result = self.quant_linear_module(x)
53
+
54
+ if self.disable_adapters:
55
+ return result
56
+
57
+ for active_adapter in self.active_adapters:
58
+ if active_adapter not in self.lora_A.keys():
59
+ continue
60
+ lora_A = self.lora_A[active_adapter]
61
+ lora_B = self.lora_B[active_adapter]
62
+ dropout = self.lora_dropout[active_adapter]
63
+ scaling = self.scaling[active_adapter]
64
+
65
+ requires_conversion = not torch.is_autocast_enabled()
66
+ if requires_conversion:
67
+ expected_dtype = result.dtype
68
+ x = x.to(lora_A.weight.dtype)
69
+
70
+ output = lora_B(lora_A(dropout(x)))
71
+ if requires_conversion:
72
+ output = output.to(expected_dtype)
73
+ output = output * scaling
74
+ result = result + output
75
+ return result
76
+
77
+ def __repr__(self) -> str:
78
+ rep = super().__repr__()
79
+ return "lora." + rep
80
+
81
+
82
+ def dispatch_awq(
83
+ target: torch.nn.Module,
84
+ adapter_name: str,
85
+ **kwargs: Any,
86
+ ) -> Optional[torch.nn.Module]:
87
+ new_module = None
88
+
89
+ if isinstance(target, BaseTunerLayer):
90
+ target_base_layer = target.get_base_layer()
91
+ else:
92
+ target_base_layer = target
93
+
94
+ if is_auto_awq_available() and isinstance(target_base_layer, WQLinear_GEMM):
95
+ # Raise the error only at the dispatch level
96
+ AUTOAWQ_MINIMUM_VERSION = packaging.version.parse("0.2.0")
97
+ version_autoawq = packaging.version.parse(importlib_metadata.version("autoawq"))
98
+
99
+ if AUTOAWQ_MINIMUM_VERSION > version_autoawq:
100
+ raise ImportError(
101
+ f"Found an incompatible version of auto-awq. Found version {version_autoawq}, "
102
+ f"but only versions above {AUTOAWQ_MINIMUM_VERSION} are supported for PEFT."
103
+ )
104
+
105
+ new_module = AwqLoraLinear(target, adapter_name, **kwargs)
106
+ target.qweight = target_base_layer.qweight
107
+
108
+ return new_module