applied-ai-018 commited on
Commit
3476693
·
verified ·
1 Parent(s): 2f0dc34

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/peft/tuners/__pycache__/tuners_utils.cpython-310.pyc +0 -0
  2. env-llmeval/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__init__.py +19 -0
  3. env-llmeval/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/__init__.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/config.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/layer.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/model.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/utils.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/peft/tuners/adaption_prompt/config.py +80 -0
  9. env-llmeval/lib/python3.10/site-packages/peft/tuners/adaption_prompt/layer.py +128 -0
  10. env-llmeval/lib/python3.10/site-packages/peft/tuners/adaption_prompt/model.py +161 -0
  11. env-llmeval/lib/python3.10/site-packages/peft/tuners/adaption_prompt/utils.py +121 -0
  12. env-llmeval/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/__init__.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/bnb.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/config.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/peft/tuners/ia3/config.py +98 -0
  16. env-llmeval/lib/python3.10/site-packages/peft/tuners/ia3/model.py +394 -0
  17. env-llmeval/lib/python3.10/site-packages/peft/tuners/loha/__init__.py +20 -0
  18. env-llmeval/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/__init__.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/config.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/layer.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/model.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/peft/tuners/loha/config.py +121 -0
  23. env-llmeval/lib/python3.10/site-packages/peft/tuners/loha/layer.py +375 -0
  24. env-llmeval/lib/python3.10/site-packages/peft/tuners/loha/model.py +114 -0
  25. env-llmeval/lib/python3.10/site-packages/peft/tuners/lokr/__init__.py +20 -0
  26. env-llmeval/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/__init__.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/config.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/layer.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/model.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/peft/tuners/lokr/config.py +127 -0
  31. env-llmeval/lib/python3.10/site-packages/peft/tuners/lokr/layer.py +409 -0
  32. env-llmeval/lib/python3.10/site-packages/peft/tuners/lokr/model.py +115 -0
  33. env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/__init__.py +37 -0
  34. env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/__init__.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/aqlm.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/awq.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/bnb.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/config.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/gptq.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/layer.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/model.cpython-310.pyc +0 -0
  42. env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/tp_layer.cpython-310.pyc +0 -0
  43. env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/aqlm.py +100 -0
  44. env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/awq.py +108 -0
  45. env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/bnb.py +508 -0
  46. env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/config.py +299 -0
  47. env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/gptq.py +114 -0
  48. env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/layer.py +1066 -0
  49. env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/model.py +793 -0
  50. env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/tp_layer.py +230 -0
env-llmeval/lib/python3.10/site-packages/peft/tuners/__pycache__/tuners_utils.cpython-310.pyc ADDED
Binary file (25.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from .config import AdaptionPromptConfig
15
+ from .layer import AdaptedAttention
16
+ from .model import AdaptionPromptModel
17
+
18
+
19
+ __all__ = ["AdaptionPromptConfig", "AdaptedAttention", "AdaptionPromptModel"]
env-llmeval/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (374 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/config.cpython-310.pyc ADDED
Binary file (2.11 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/layer.cpython-310.pyc ADDED
Binary file (3.28 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/model.cpython-310.pyc ADDED
Binary file (5.56 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/utils.cpython-310.pyc ADDED
Binary file (3.55 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/adaption_prompt/config.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from collections import namedtuple
16
+ from dataclasses import dataclass, field
17
+
18
+ from peft.config import PeftConfig
19
+ from peft.utils import PeftType
20
+
21
+ from .utils import llama_compute_query_states
22
+
23
+
24
+ @dataclass
25
+ class AdaptionPromptConfig(PeftConfig):
26
+ """Stores the configuration of an [`AdaptionPromptModel`]."""
27
+
28
+ target_modules: str = field(
29
+ default=None, metadata={"help": "Name of the attention submodules to insert adaption prompts into."}
30
+ )
31
+ adapter_len: int = field(default=None, metadata={"help": "Number of adapter tokens to insert"})
32
+ adapter_layers: int = field(default=None, metadata={"help": "Number of adapter layers (from the top)"})
33
+
34
+ def __post_init__(self):
35
+ self.peft_type = PeftType.ADAPTION_PROMPT
36
+
37
+ @property
38
+ def is_adaption_prompt(self) -> bool:
39
+ """Return True if this is an adaption prompt config."""
40
+ return True
41
+
42
+
43
+ # Contains the config that is specific to a transformers model type.
44
+ ModelTypeConfig = namedtuple(
45
+ "ModelTypeConfig", ["compute_query_states", "target_modules", "k_proj_layer", "v_proj_layer", "o_proj_layer"]
46
+ )
47
+
48
+ # Mapping of transformers model types to their specific configuration.
49
+ TRANSFORMERS_MODEL_CONFIG = {
50
+ "llama": ModelTypeConfig(
51
+ compute_query_states=llama_compute_query_states,
52
+ target_modules="self_attn",
53
+ k_proj_layer="k_proj",
54
+ v_proj_layer="v_proj",
55
+ o_proj_layer="o_proj",
56
+ ),
57
+ "mistral": ModelTypeConfig( # same as llama,
58
+ compute_query_states=llama_compute_query_states,
59
+ target_modules="self_attn",
60
+ k_proj_layer="k_proj",
61
+ v_proj_layer="v_proj",
62
+ o_proj_layer="o_proj",
63
+ ),
64
+ }
65
+
66
+
67
+ def prepare_config(
68
+ peft_config: AdaptionPromptConfig,
69
+ model,
70
+ ) -> AdaptionPromptConfig:
71
+ """Prepare the config based on the llama model type."""
72
+ if model.config.model_type not in TRANSFORMERS_MODEL_CONFIG:
73
+ raise ValueError("Unsupported model type for adaption prompt: '{model.config.model_type}'.")
74
+
75
+ model_config = TRANSFORMERS_MODEL_CONFIG[model.config.model_type]
76
+
77
+ if peft_config.target_modules is None:
78
+ peft_config.target_modules = model_config.target_modules
79
+
80
+ return peft_config
env-llmeval/lib/python3.10/site-packages/peft/tuners/adaption_prompt/layer.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+
17
+ import torch
18
+ import torch.nn as nn
19
+ import torch.nn.functional as F
20
+
21
+ from .config import TRANSFORMERS_MODEL_CONFIG
22
+
23
+
24
+ class AdaptedAttention(nn.Module):
25
+ """This module wraps a LLamaAttention module and injects adaption prompts."""
26
+
27
+ def __init__(self, model_type: str, adapter_len: int, model):
28
+ """
29
+ Initialize object.
30
+
31
+ Args:
32
+ model_type: The transformer model type. This is used to retrieve the right method to
33
+ compute query states.
34
+ adapter_len: The length of the adaption prompt to insert.
35
+ model: The original transformer attention module that is being wrapped.
36
+ """
37
+ assert not isinstance(model, AdaptedAttention)
38
+ super().__init__()
39
+ self.model_type = model_type
40
+ self.model = model
41
+ self.adapter_len = adapter_len
42
+ # Assume all parameters of the attention model we are wrapping are on the same device.
43
+ device = next(model.parameters()).device
44
+ # Don't think this was specified in the paper, but we follow the official repo which used an Embedding
45
+ # which initializes the tokens with standard normal values.
46
+ # https://github.com/ZrrSkywalker/LLaMA-Adapter/blob/41c3546fe1997ab8a65809dc8d8f9252b19d9faf/llama/model.py#L234
47
+ # (bsz, adapter_len, hidden_size)
48
+ target_dtype = (
49
+ model.q_proj.weight.dtype if model.q_proj.weight.dtype not in [torch.int8, torch.uint8] else torch.float32
50
+ )
51
+ self.adaption_prompt = nn.Parameter(
52
+ torch.empty(1, adapter_len, self.model.hidden_size, device=device, dtype=target_dtype).normal_()
53
+ )
54
+ # Initialize the gate to 0 as this is "zero-init".
55
+ self.adaption_gate = nn.Parameter(torch.zeros(1, device=device, dtype=target_dtype))
56
+
57
+ def forward(self, **kwargs):
58
+ """
59
+ Forward pass for the adapter which wraps the original LlamaAttention module.
60
+
61
+ "Official" paper implementation:
62
+ https://github.com/ZrrSkywalker/LLaMA-Adapter/blob/41c3546fe1997ab8a65809dc8d8f9252b19d9faf/llama/model.py#L141
63
+
64
+ Args:
65
+ kwargs: See the original LlamaAttention module.
66
+ """
67
+ if kwargs.get("output_attention", False):
68
+ raise NotImplementedError("output_attention is not currently supported.")
69
+
70
+ output, _, past_key_value = self.model(**kwargs)
71
+ bsz = output.shape[0]
72
+ q_len = output.shape[1]
73
+ embed_dim = output.shape[2]
74
+ k_proj_layer = TRANSFORMERS_MODEL_CONFIG[self.model_type].k_proj_layer
75
+ v_proj_layer = TRANSFORMERS_MODEL_CONFIG[self.model_type].v_proj_layer
76
+ o_proj_layer = TRANSFORMERS_MODEL_CONFIG[self.model_type].o_proj_layer
77
+ factor = (
78
+ self.model.k_proj.in_features // self.model.k_proj.out_features
79
+ ) # Mistral has different input and output dimension for k_proj and v_proj layers
80
+
81
+ if k_proj_layer == v_proj_layer:
82
+ _, key, value = getattr(self.model, k_proj_layer)(self.adaption_prompt).split(embed_dim, dim=2)
83
+ else:
84
+ key = getattr(self.model, k_proj_layer)(self.adaption_prompt)
85
+ value = getattr(self.model, v_proj_layer)(self.adaption_prompt)
86
+
87
+ # (bsz, num_key_value_heads, adapter_len, head_dim)
88
+ adapter_k = (
89
+ key.view(1, self.adapter_len, (self.model.num_heads // factor), self.model.head_dim)
90
+ .repeat(bsz, 1, 1, 1)
91
+ .transpose(1, 2)
92
+ )
93
+ adapter_v = (
94
+ value.view(1, self.adapter_len, (self.model.num_heads // factor), self.model.head_dim)
95
+ .repeat(bsz, 1, 1, 1)
96
+ .transpose(1, 2)
97
+ )
98
+ # Below is taken from https://github.com/huggingface/transformers/blob/e547458c43dfdbbb8f6a7757237e234c44e20a8f/src/transformers/models/mistral/modeling_mistral.py#L181
99
+ # (bsz, num_heads, adapter_len, head_dim)
100
+ adapter_k = torch.repeat_interleave(adapter_k, repeats=factor, dim=1)
101
+ adapter_v = torch.repeat_interleave(adapter_v, repeats=factor, dim=1)
102
+ # Recompute query states.
103
+ compute_query_states = TRANSFORMERS_MODEL_CONFIG[self.model_type].compute_query_states
104
+ # (bsz, num_heads, q_len, head_dim)
105
+ query_states = compute_query_states(model=self.model, **kwargs)
106
+
107
+ previous_dtype = query_states.dtype
108
+
109
+ # (bsz, num_heads, q_len, adapter_len)
110
+ scores = torch.matmul(query_states, adapter_k.transpose(2, 3).to(previous_dtype)) / math.sqrt(
111
+ self.model.head_dim
112
+ )
113
+ # Upcast attention to fp32
114
+ # (bsz, num_heads, q_len, adapter_len)
115
+ scores = self.adaption_gate * F.softmax(scores, dim=-1, dtype=torch.float32).to(previous_dtype)
116
+ # (bsz, q_len, num_heads * head_dim)
117
+ adapter_output = torch.matmul(scores, adapter_v).transpose(1, 2).reshape(bsz, q_len, -1)
118
+
119
+ # (bsz, q_len, hidden_size)
120
+ if o_proj_layer is not None:
121
+ adapter_output = getattr(self.model, o_proj_layer)(adapter_output)
122
+
123
+ # Add adaption prompt output to original output.
124
+ output = output + adapter_output
125
+
126
+ # Restore original dtype.
127
+ output = output.to(previous_dtype)
128
+ return output, None, past_key_value
env-llmeval/lib/python3.10/site-packages/peft/tuners/adaption_prompt/model.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Dict, List
16
+
17
+ import torch.nn as nn
18
+
19
+ from peft.utils import _freeze_adapter, _get_submodules
20
+
21
+ from .config import AdaptionPromptConfig, prepare_config
22
+ from .layer import AdaptedAttention
23
+ from .utils import is_adaption_prompt_trainable
24
+
25
+
26
+ class AdaptionPromptModel(nn.Module):
27
+ """
28
+ Implements adaption prompts as described in https://arxiv.org/pdf/2303.16199.pdf.
29
+
30
+ The top L attention modules are replaced with AdaptedAttention modules that wrap the original ones, but insert
31
+ trainable prompts with gates (for zero init).
32
+
33
+ Notes on the multi-adapter pattern:
34
+ - We store the states of different adapters by keeping a dictionary of AdaptedAttention modules indexed by adapter
35
+ name.
36
+ - Every time we switch adapters, we remove the modules of the currently active adapter from the model, store them
37
+ in the dictionary, and replace them with the modules of the new adapter.
38
+ - To avoid duplicated and potentially inconsistent state, the currently active adapter is always removed from the
39
+ dictionary.
40
+ - Disabling the adapter would also result in the modules being removed from the model.
41
+ """
42
+
43
+ def __init__(self, model, configs: Dict, adapter_name: str):
44
+ super().__init__()
45
+ self.model = model
46
+ # Store adapter configs by name.
47
+ self.peft_config: Dict[str, AdaptionPromptConfig] = {}
48
+ # Store lists of the parents of the affected attention modules by adapter name.
49
+ # We keep references to the parents so we can swap the adapters in-and-out of the model.
50
+ self._parents: Dict[str, List[nn.Module]] = {}
51
+ # Store lists of cached AdaptedAttention modules by name.
52
+ self._cached_adapters: Dict[str, List] = {}
53
+ # The name of the currently active adapter.
54
+ self._active_adapter = None
55
+ # Whether the adapter is enabled.
56
+ self._enabled = True
57
+ self.forward = self.model.forward
58
+ self.add_adapter(adapter_name, configs[adapter_name])
59
+ self._mark_only_adaption_prompts_as_trainable(self.model)
60
+
61
+ def add_adapter(self, adapter_name: str, config: AdaptionPromptConfig) -> None:
62
+ """Add an adapter with the given name and config."""
63
+ config = prepare_config(config, self.model)
64
+ if adapter_name in self.peft_config:
65
+ raise ValueError(f"Adapter with name '{adapter_name}' already exists.")
66
+
67
+ parents = []
68
+ for name, _ in self.model.named_modules():
69
+ if name.endswith(config.target_modules):
70
+ par, _, _ = _get_submodules(self.model, name)
71
+ parents.append(par)
72
+ if len(parents) < config.adapter_layers:
73
+ raise ValueError(
74
+ f"Config specifies more adapter layers '{config.adapter_layers}'"
75
+ f" than the model has '{len(parents)}'."
76
+ )
77
+ # Note that if the target modules are not in Sequential, ModuleList, or
78
+ # some other PyTorch ordered container, the behavior is undefined as we
79
+ # assume here that the order of the modules is the same as the order of
80
+ # the transformer decoder layers.
81
+ parents = parents[-config.adapter_layers :]
82
+ self._parents[adapter_name] = parents
83
+
84
+ # It is only None during initialization.
85
+ # If it is disabled, we don't have to remove the modules.
86
+ if self._active_adapter is not None and self._enabled:
87
+ self._remove_adapted_attentions(self._active_adapter)
88
+ self._active_adapter = adapter_name
89
+ self.peft_config[adapter_name] = config
90
+ self._create_adapted_attentions(config, parents)
91
+ if not self._enabled:
92
+ self._remove_adapted_attentions(self._active_adapter)
93
+
94
+ if config.inference_mode:
95
+ _freeze_adapter(self.model, adapter_name)
96
+
97
+ def set_adapter(self, adapter_name: str) -> None:
98
+ """Set the model to use the adapter with the given name."""
99
+ if self._active_adapter == adapter_name:
100
+ return
101
+ if adapter_name not in self.peft_config:
102
+ raise ValueError(f"Adapter with name '{adapter_name}' does not exist.")
103
+
104
+ if self._enabled:
105
+ self._remove_adapted_attentions(self._active_adapter)
106
+ self._set_adapted_attentions(adapter_name)
107
+
108
+ self._active_adapter = adapter_name
109
+
110
+ def enable_adapter_layers(self):
111
+ """Enable adapter layers by swapping in cached AdaptedAttention modules."""
112
+ self._enabled = True
113
+ self._set_adapted_attentions(self._active_adapter)
114
+
115
+ def disable_adapter_layers(self):
116
+ """Disable adapter layers by swapping out AdaptedAttention modules."""
117
+ self._enabled = False
118
+ self._remove_adapted_attentions(self._active_adapter)
119
+
120
+ def _create_adapted_attentions(self, config: AdaptionPromptConfig, parents: List[nn.Module]) -> None:
121
+ """Wrap LlamaAttention modules with newly created AdaptedAttention modules."""
122
+ for par in parents:
123
+ attn = AdaptedAttention(
124
+ model_type=self.model.config.model_type,
125
+ adapter_len=config.adapter_len,
126
+ model=getattr(par, config.target_modules),
127
+ )
128
+ setattr(par, config.target_modules, attn)
129
+
130
+ def _set_adapted_attentions(self, adapter_name: str) -> None:
131
+ """Replace LlamaAttention modules with cached AdaptedAttention modules."""
132
+ cached = self._cached_adapters[adapter_name]
133
+ del self._cached_adapters[adapter_name]
134
+ config = self.peft_config[adapter_name]
135
+ for i, par in enumerate(self._parents[adapter_name]):
136
+ setattr(par, config.target_modules, cached[i])
137
+
138
+ def _remove_adapted_attentions(self, adapter_name: str) -> None:
139
+ """Remove AdaptedAttention modules from the model and store them in the cache."""
140
+ config = self.peft_config[adapter_name]
141
+ adapted_attentions = []
142
+ for par in self._parents[adapter_name]:
143
+ attn = getattr(par, config.target_modules)
144
+ adapted_attentions.append(attn)
145
+ setattr(par, config.target_modules, attn.model)
146
+ self._cached_adapters[adapter_name] = adapted_attentions
147
+
148
+ def _mark_only_adaption_prompts_as_trainable(self, model: nn.Module) -> None:
149
+ """Freeze all parameters of the model except the adaption prompts."""
150
+ for n, p in model.named_parameters():
151
+ if not is_adaption_prompt_trainable(n):
152
+ p.requires_grad = False
153
+
154
+ def __getattr__(self, name: str):
155
+ """Forward missing attributes to the wrapped module."""
156
+ try:
157
+ return super().__getattr__(name) # defer to nn.Module's logic
158
+ except AttributeError:
159
+ # This is necessary as e.g. causal models have various methods that we
160
+ # don't want to re-implement here.
161
+ return getattr(self.model, name)
env-llmeval/lib/python3.10/site-packages/peft/tuners/adaption_prompt/utils.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import inspect
15
+
16
+ import torch
17
+ import torch.nn as nn
18
+
19
+
20
+ def llama_rotate_half(x: torch.Tensor) -> torch.Tensor:
21
+ """
22
+ Rotate half the hidden dims of the input.
23
+
24
+ This function was duplicated verbatim from:
25
+ https://github.com/huggingface/transformers/blob/1de8ce9ee1191ba761a593ac15d9ccbf5851bfc5/src/transformers/models/llama/modeling_llama.py#L126
26
+
27
+ This was done to eliminate the Llama transformers implementation as a dependency of this file. Note that some other
28
+ functions were also adapted from the transformers implementation but were modified.
29
+ """
30
+ x1 = x[..., : x.shape[-1] // 2]
31
+ x2 = x[..., x.shape[-1] // 2 :]
32
+ return torch.cat((-x2, x1), dim=-1)
33
+
34
+
35
+ def llama_apply_rotary_pos_emb(q, cos, sin, position_ids):
36
+ """
37
+ Apply rotary position embedding to query states in the Llama model.
38
+
39
+ This function was adapted from:
40
+ https://github.com/huggingface/transformers/blob/1de8ce9ee1191ba761a593ac15d9ccbf5851bfc5/src/transformers/models/llama/modeling_llama.py#L133
41
+
42
+ It was modified to remove unnecessary processing of key states. The method is compatible with transformers <=
43
+ 4.34.2 and also with the latest version (>=4.35).
44
+ """
45
+ # In previous transformers version cos/sin cached had a shape of 4D
46
+ if len(cos.shape) == 4:
47
+ gather_indices = position_ids[:, None, :, None] # [bs, 1, seq_len, 1]
48
+ gather_indices = gather_indices.repeat(1, cos.shape[1], 1, cos.shape[3])
49
+ cos = torch.gather(cos.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
50
+ sin = torch.gather(sin.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
51
+ # In the new version, it is 2D so we fall back to the new implementation
52
+ # https://github.com/huggingface/transformers/blame/eef7ea98c31a333bacdc7ae7a2372bde772be8e4/src/transformers/models/llama/modeling_llama.py#L222-L226
53
+ else:
54
+ cos = cos[position_ids].unsqueeze(1)
55
+ sin = sin[position_ids].unsqueeze(1)
56
+ q_embed = (q * cos) + (llama_rotate_half(q) * sin)
57
+ return q_embed
58
+
59
+
60
+ def llama_compute_query_states(model: nn.Module, **kwargs) -> torch.Tensor:
61
+ """
62
+ Compute query states for Llama models specifically. They need to be recomputed as the forward() method of the
63
+ original LlamaModel in the transformers library does not return them. See the related discussion in the PR:
64
+ https://github.com/huggingface/peft/pull/268
65
+ """
66
+ hidden_states = kwargs.get("hidden_states")
67
+ position_ids = kwargs.get("position_ids")
68
+ past_key_value = kwargs.get("past_key_value")
69
+ bsz, q_len, _ = hidden_states.size()
70
+ query_states = model.q_proj(hidden_states).view(bsz, q_len, model.num_heads, model.head_dim).transpose(1, 2)
71
+
72
+ factor = model.k_proj.in_features // model.k_proj.out_features
73
+ value_states = (
74
+ model.v_proj(hidden_states).view(bsz, q_len, (model.num_heads // factor), model.head_dim).transpose(1, 2)
75
+ )
76
+
77
+ seq_len = q_len
78
+
79
+ if past_key_value is not None:
80
+ if isinstance(past_key_value, tuple):
81
+ # for transformers <= 4.35
82
+ seq_len += past_key_value[0].shape[-2]
83
+ else:
84
+ # since transformers 4.36, this is a DynamicCache instance
85
+ seq_len += past_key_value.get_seq_length(model.layer_idx)
86
+
87
+ # For transformers > 4.37.2 `position_ids` became a required arguments in the rotary embedding's forward pass.
88
+ if "position_ids" not in inspect.signature(model.rotary_emb.forward).parameters:
89
+ # TODO we assume that position_ids is not None here, not sure if that is safe but the old code also did that
90
+ cos, sin = model.rotary_emb(value_states, seq_len=seq_len)
91
+ return llama_apply_rotary_pos_emb(query_states, cos, sin, position_ids)
92
+
93
+ past_seen_tokens = 0
94
+ if position_ids is None:
95
+ # Compute position_ids, since they are required for transformers > 4.37.2
96
+ if past_key_value is None:
97
+ new_cache_positions = torch.arange(q_len, q_len + q_len, device=value_states.device)
98
+ else:
99
+ past_seen_tokens = past_key_value.get_usable_length(q_len, model.layer_idx)
100
+ new_cache_positions = torch.arange(past_seen_tokens, past_seen_tokens + q_len, device=value_states.device)
101
+ position_ids = new_cache_positions.unsqueeze(0)
102
+
103
+ rotary_emb_kwargs = {"position_ids": position_ids}
104
+ # The `seq_len` argument has been officially removed in transformers >= 4.39.0
105
+ if "seq_len" in inspect.signature(model.rotary_emb.forward).parameters:
106
+ rotary_emb_kwargs["seq_len"] = q_len + past_seen_tokens
107
+
108
+ cos, sin = model.rotary_emb(value_states, **rotary_emb_kwargs)
109
+
110
+ # For batched inference unsqueeze it on the correct dim
111
+ # since: https://github.com/huggingface/transformers/pull/29109
112
+ if len(cos.shape) == 3:
113
+ cos = cos.unsqueeze(1)
114
+ sin = sin.unsqueeze(1)
115
+
116
+ return (query_states * cos) + (llama_rotate_half(query_states) * sin)
117
+
118
+
119
+ def is_adaption_prompt_trainable(params: str) -> bool:
120
+ """Return True if module is trainable under adaption prompt fine-tuning."""
121
+ return params.split(".")[-1].startswith("adaption_")
env-llmeval/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (800 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/bnb.cpython-310.pyc ADDED
Binary file (2.56 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/config.cpython-310.pyc ADDED
Binary file (4.24 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/ia3/config.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from dataclasses import dataclass, field
16
+ from typing import List, Optional, Union
17
+
18
+ from peft.config import PeftConfig
19
+ from peft.utils import PeftType
20
+
21
+
22
+ @dataclass
23
+ class IA3Config(PeftConfig):
24
+ """
25
+ This is the configuration class to store the configuration of a [`IA3Model`].
26
+
27
+ Args:
28
+ target_modules (`Optional[Union[List[str], str]]`):
29
+ The names of the modules to apply the adapter to. If this is specified, only the modules with the specified
30
+ names will be replaced. When passing a string, a regex match will be performed. When passing a list of
31
+ strings, either an exact match will be performed or it is checked if the name of the module ends with any
32
+ of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen,
33
+ excluding the output layer. If this is not specified, modules will be chosen according to the model
34
+ architecture. If the architecture is not known, an error will be raised -- in this case, you should specify
35
+ the target modules manually.
36
+ feedforward_modules (`Optional[Union[List[str], str]]`):
37
+ The names of the modules to be treated as feedforward modules, as in the original paper. These modules will
38
+ have (IA)³ vectors multiplied to the input, instead of the output. `feedforward_modules` must be a name or
39
+ a subset of names present in `target_modules`.
40
+ fan_in_fan_out (`bool`):
41
+ Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses
42
+ `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`.
43
+ modules_to_save (`Optional[List[str]]`):
44
+ List of modules apart from (IA)³ layers to be set as trainable and saved in the final checkpoint.
45
+ init_ia3_weights (`bool`):
46
+ Whether to initialize the vectors in the (IA)³ layers, defaults to `True`. Setting this to `False` is
47
+ discouraged.
48
+ """
49
+
50
+ target_modules: Optional[Union[List[str], str]] = field(
51
+ default=None,
52
+ metadata={
53
+ "help": (
54
+ "List of module names or regex expression of the module names to replace with (IA)³."
55
+ "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'."
56
+ "This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer."
57
+ "If not specified, modules will be chosen according to the model architecture, If the architecture is "
58
+ "not known, an error will be raised -- in this case, you should specify the target modules manually."
59
+ ),
60
+ },
61
+ )
62
+ feedforward_modules: Optional[Union[List[str], str]] = field(
63
+ default=None,
64
+ metadata={
65
+ "help": "List of module names or a regex expression of module names which are feedforward"
66
+ "For example, ['output.dense']"
67
+ },
68
+ )
69
+ fan_in_fan_out: bool = field(
70
+ default=False,
71
+ metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"},
72
+ )
73
+ modules_to_save: Optional[List[str]] = field(
74
+ default=None,
75
+ metadata={
76
+ "help": "List of modules apart from (IA)^3 layers to be set as trainable and saved in the final checkpoint. "
77
+ "For example, in Sequence Classification or Token Classification tasks, "
78
+ "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved."
79
+ },
80
+ )
81
+ init_ia3_weights: bool = field(
82
+ default=True,
83
+ metadata={"help": "Whether to initialize the vectors in the (IA)^3 layers."},
84
+ )
85
+
86
+ def __post_init__(self):
87
+ self.peft_type = PeftType.IA3
88
+ self.target_modules = (
89
+ set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
90
+ )
91
+ self.feedforward_modules = (
92
+ set(self.feedforward_modules) if isinstance(self.feedforward_modules, list) else self.feedforward_modules
93
+ )
94
+
95
+ # check if feedforward_modules is a subset of target_modules. run the check only if both are sets
96
+ if isinstance(self.feedforward_modules, set) and isinstance(self.target_modules, set):
97
+ if not self.feedforward_modules.issubset(self.target_modules):
98
+ raise ValueError("`feedforward_modules` should be a subset of `target_modules`")
env-llmeval/lib/python3.10/site-packages/peft/tuners/ia3/model.py ADDED
@@ -0,0 +1,394 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from __future__ import annotations
15
+
16
+ import re
17
+ import warnings
18
+ from dataclasses import asdict
19
+ from enum import Enum
20
+ from typing import Optional
21
+
22
+ import torch
23
+ from torch import nn
24
+ from transformers.pytorch_utils import Conv1D
25
+
26
+ from peft.import_utils import is_bnb_4bit_available, is_bnb_available
27
+ from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists
28
+ from peft.utils import (
29
+ TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING,
30
+ TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING,
31
+ ModulesToSaveWrapper,
32
+ _get_submodules,
33
+ )
34
+
35
+ from .layer import Conv2d, IA3Layer, Linear
36
+
37
+
38
+ class IA3Model(BaseTuner):
39
+ """
40
+ Creates a Infused Adapter by Inhibiting and Amplifying Inner Activations ((IA)^3) model from a pretrained
41
+ transformers model. The method is described in detail in https://arxiv.org/abs/2205.05638
42
+
43
+ Args:
44
+ model ([`~transformers.PreTrainedModel`]): The model to be adapted.
45
+ config ([`IA3Config`]): The configuration of the (IA)^3 model.
46
+ adapter_name (`str`): The name of the adapter, defaults to `"default"`.
47
+
48
+ Returns:
49
+ `torch.nn.Module`: The (IA)^3 model.
50
+
51
+ Example:
52
+
53
+ ```py
54
+ >>> from transformers import AutoModelForSeq2SeqLM, ia3Config
55
+ >>> from peft import IA3Model, IA3Config
56
+
57
+ >>> config = IA3Config(
58
+ ... peft_type="IA3",
59
+ ... task_type="SEQ_2_SEQ_LM",
60
+ ... target_modules=["k", "v", "w0"],
61
+ ... feedforward_modules=["w0"],
62
+ ... )
63
+
64
+ >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
65
+ >>> ia3_model = IA3Model(config, model)
66
+ ```
67
+
68
+ **Attributes**:
69
+ - **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted.
70
+ - **peft_config** ([`ia3Config`]): The configuration of the (IA)^3 model.
71
+ """
72
+
73
+ prefix: str = "ia3_"
74
+
75
+ def __init__(self, model, config, adapter_name):
76
+ super().__init__(model, config, adapter_name)
77
+
78
+ @staticmethod
79
+ def _create_new_module(ia3_config, adapter_name, target, **kwargs):
80
+ # avoid eager bnb import
81
+ if is_bnb_available():
82
+ import bitsandbytes as bnb
83
+
84
+ from .bnb import Linear8bitLt
85
+
86
+ if is_bnb_4bit_available():
87
+ from .bnb import Linear4bit
88
+
89
+ loaded_in_8bit = kwargs.pop("loaded_in_8bit", False)
90
+ loaded_in_4bit = kwargs.pop("loaded_in_4bit", False)
91
+ is_feedforward = kwargs.pop("is_feedforward", False)
92
+
93
+ if isinstance(target, BaseTunerLayer):
94
+ target_base_layer = target.get_base_layer()
95
+ else:
96
+ target_base_layer = target
97
+
98
+ if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt):
99
+ eightbit_kwargs = kwargs.copy()
100
+ eightbit_kwargs.update(
101
+ {
102
+ "has_fp16_weights": target_base_layer.state.has_fp16_weights,
103
+ "memory_efficient_backward": target_base_layer.state.memory_efficient_backward,
104
+ "threshold": target_base_layer.state.threshold,
105
+ "index": target_base_layer.index,
106
+ }
107
+ )
108
+ new_module = Linear8bitLt(target, adapter_name, is_feedforward=is_feedforward, **eightbit_kwargs)
109
+ elif loaded_in_4bit and isinstance(target_base_layer, bnb.nn.Linear4bit):
110
+ fourbit_kwargs = kwargs.copy()
111
+ fourbit_kwargs.update(
112
+ {
113
+ "compute_dtype": target_base_layer.compute_dtype,
114
+ "compress_statistics": target_base_layer.weight.compress_statistics,
115
+ "quant_type": target_base_layer.weight.quant_type,
116
+ }
117
+ )
118
+ new_module = Linear4bit(target, adapter_name, is_feedforward=is_feedforward, **fourbit_kwargs)
119
+ elif isinstance(target, torch.nn.Conv2d):
120
+ new_module = Conv2d(target, adapter_name, is_feedforward=is_feedforward, **kwargs)
121
+ elif isinstance(target_base_layer, torch.nn.Linear):
122
+ if kwargs["fan_in_fan_out"]:
123
+ warnings.warn(
124
+ "fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. "
125
+ "Setting fan_in_fan_out to False."
126
+ )
127
+ kwargs["fan_in_fan_out"] = ia3_config.fan_in_fan_out = False
128
+ new_module = Linear(target, adapter_name, is_feedforward=is_feedforward, **kwargs)
129
+ elif isinstance(target_base_layer, Conv1D):
130
+ if not kwargs["fan_in_fan_out"]:
131
+ warnings.warn(
132
+ "fan_in_fan_out is set to False but the target module is `Conv1D`. "
133
+ "Setting fan_in_fan_out to True."
134
+ )
135
+ kwargs["fan_in_fan_out"] = ia3_config.fan_in_fan_out = True
136
+ new_module = Linear(
137
+ target, adapter_name, is_feedforward=is_feedforward, is_target_conv_1d_layer=True, **kwargs
138
+ )
139
+ else:
140
+ raise ValueError(
141
+ f"Target module {target} is not supported. "
142
+ f"Currently, only `torch.nn.Linear`, `torch.nn.Conv2d`, and `Conv1D` are supported."
143
+ )
144
+ return new_module
145
+
146
+ @staticmethod
147
+ def _check_target_module_exists(ia3_config, key):
148
+ return check_target_module_exists(ia3_config, key)
149
+
150
+ def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None:
151
+ for n, p in model.named_parameters():
152
+ if self.prefix not in n:
153
+ p.requires_grad = False
154
+
155
+ def _create_and_replace(
156
+ self,
157
+ ia3_config,
158
+ adapter_name,
159
+ target,
160
+ target_name,
161
+ parent,
162
+ current_key,
163
+ ):
164
+ # check if target module is in feedforward_modules
165
+ is_feedforward = self._check_target_module_feedforward(ia3_config, current_key)
166
+
167
+ kwargs = {
168
+ "fan_in_fan_out": ia3_config.fan_in_fan_out,
169
+ "init_ia3_weights": ia3_config.init_ia3_weights,
170
+ "is_feedforward": is_feedforward,
171
+ "loaded_in_8bit": getattr(self.model, "is_loaded_in_8bit", False),
172
+ "loaded_in_4bit": getattr(self.model, "is_loaded_in_4bit", False),
173
+ }
174
+
175
+ if isinstance(target, IA3Layer):
176
+ target.update_layer(
177
+ adapter_name,
178
+ ia3_config.init_ia3_weights,
179
+ )
180
+ else:
181
+ new_module = self._create_new_module(ia3_config, adapter_name, target, **kwargs)
182
+ if adapter_name != self.active_adapter:
183
+ # adding an additional adapter: it is not automatically trainable
184
+ new_module.requires_grad_(False)
185
+ self._replace_module(parent, target_name, new_module, target)
186
+
187
+ @staticmethod
188
+ def _check_target_module_feedforward(ia3_config, key) -> bool:
189
+ """
190
+ A helper private method that checks if the target module `key` matches with a feedforward module specified in
191
+ `ia3_config`
192
+ """
193
+ if isinstance(ia3_config.feedforward_modules, str):
194
+ is_feedforward = bool(re.fullmatch(ia3_config.feedforward_modules, key))
195
+ else:
196
+ is_feedforward = any(key.endswith(target_key) for target_key in ia3_config.feedforward_modules)
197
+ return is_feedforward
198
+
199
+ def _replace_module(self, parent, child_name, new_module, child):
200
+ setattr(parent, child_name, new_module)
201
+
202
+ # child layer wraps the original module, unpack it
203
+ if hasattr(child, "base_layer"):
204
+ child = child.base_layer
205
+
206
+ # layers with base_layer don't need the weight to be copied, as they have a reference already
207
+ if not hasattr(new_module, "base_layer"):
208
+ new_module.weight = child.weight
209
+ if hasattr(child, "bias"):
210
+ new_module.bias = child.bias
211
+
212
+ if getattr(child, "state", None) is not None:
213
+ if hasattr(new_module, "base_layer"):
214
+ new_module.base_layer.state = child.state
215
+ else:
216
+ new_module.state = child.state
217
+ new_module.to(child.weight.device)
218
+
219
+ # dispatch to correct device
220
+ for name, module in new_module.named_modules():
221
+ if self.prefix in name:
222
+ module.to(child.weight.device)
223
+
224
+ def __getattr__(self, name: str):
225
+ """Forward missing attributes to the wrapped module."""
226
+ try:
227
+ return super().__getattr__(name) # defer to nn.Module's logic
228
+ except AttributeError:
229
+ return getattr(self.model, name)
230
+
231
+ def get_peft_config_as_dict(self, inference: bool = False):
232
+ config_dict = {}
233
+ for key, value in self.peft_config.items():
234
+ config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()}
235
+ if inference:
236
+ config["inference_mode"] = True
237
+ config_dict[key] = config
238
+ return config
239
+
240
+ def _set_adapter_layers(self, enabled=True):
241
+ for module in self.model.modules():
242
+ if isinstance(module, (IA3Layer, ModulesToSaveWrapper)):
243
+ module.enable_adapters(enabled)
244
+
245
+ def enable_adapter_layers(self) -> None:
246
+ """Enable all adapters.
247
+
248
+ Call this if you have previously disabled all adapters and want to re-enable them.
249
+ """
250
+ self._set_adapter_layers(enabled=True)
251
+
252
+ def disable_adapter_layers(self) -> None:
253
+ """Disable all adapters.
254
+
255
+ When disabling all adapters, the model output corresponds to the output of the base model.
256
+ """
257
+ self._set_adapter_layers(enabled=False)
258
+
259
+ def set_adapter(self, adapter_name: str | list[str]) -> None:
260
+ """Set the active adapter(s).
261
+
262
+ Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is
263
+ not desired, use the following code.
264
+
265
+ ```py
266
+ >>> for name, param in model_peft.named_parameters():
267
+ ... if ...: # some check on name (ex. if 'lora' in name)
268
+ ... param.requires_grad = False
269
+ ```
270
+
271
+ Args:
272
+ adapter_name (`str` or `list[str]`): Name of the adapter(s) to be activated.
273
+ """
274
+ for module in self.model.modules():
275
+ if isinstance(module, IA3Layer):
276
+ if module.merged:
277
+ warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.")
278
+ module.unmerge()
279
+ module.set_adapter(adapter_name)
280
+
281
+ def _prepare_adapter_config(self, peft_config, model_config):
282
+ if peft_config.target_modules is None:
283
+ if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING:
284
+ raise ValueError("Please specify `target_modules` in `peft_config`")
285
+ peft_config.target_modules = TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING[model_config["model_type"]]
286
+ if peft_config.feedforward_modules is None:
287
+ if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING:
288
+ raise ValueError("Please specify `feedforward_modules` in `peft_config`")
289
+ peft_config.feedforward_modules = TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING[
290
+ model_config["model_type"]
291
+ ]
292
+ return peft_config
293
+
294
+ def _unload_and_optionally_merge(
295
+ self, merge: bool = True, safe_merge: bool = False, adapter_names: Optional[list[str]] = None
296
+ ):
297
+ r"""
298
+ This method merges the (IA)^3 layers into the base model. This is needed if someone wants to use the base model
299
+ as a standalone model.
300
+
301
+ Args:
302
+ safe_merge (`bool`, `optional`, defaults to `False`):
303
+ If True, the merge operation will be performed in a copy of the original weights and check for NaNs
304
+ before merging the weights. This is useful if you want to check if the merge operation will produce
305
+ NaNs. Defaults to `False`.
306
+ adapter_names (`List[str]`, *optional*):
307
+ The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
308
+ to `None`.
309
+ """
310
+ if getattr(self.model, "is_loaded_in_8bit", False):
311
+ raise ValueError("Cannot merge ia3 layers when the model is loaded in 8-bit mode")
312
+
313
+ if getattr(self.model, "is_loaded_in_4bit", False):
314
+ raise ValueError("Cannot merge ia3 layers when the model is loaded in 4-bit mode")
315
+
316
+ self._unloading_checks(adapter_names)
317
+ key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key]
318
+ for key in key_list:
319
+ try:
320
+ parent, target, target_name = _get_submodules(self.model, key)
321
+ except AttributeError:
322
+ continue
323
+
324
+ if hasattr(target, "base_layer"):
325
+ if merge:
326
+ target.merge(safe_merge=safe_merge, adapter_names=adapter_names)
327
+ self._replace_module(parent, target_name, target.get_base_layer(), target)
328
+ elif isinstance(target, ModulesToSaveWrapper):
329
+ # save any additional trainable modules part of `modules_to_save`
330
+ new_module = target.modules_to_save[target.active_adapter]
331
+ if hasattr(new_module, "base_layer"):
332
+ # check if the module is itself a tuner layer
333
+ if merge:
334
+ new_module.merge(safe_merge=safe_merge, adapter_names=adapter_names)
335
+ new_module = new_module.get_base_layer()
336
+ setattr(parent, target_name, new_module)
337
+
338
+ return self.model
339
+
340
+ def merge_and_unload(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> torch.nn.Module:
341
+ r"""
342
+ This method merges the IA³ layers into the base model. This is needed if someone wants to use the base model as
343
+ a standalone model.
344
+
345
+ Args:
346
+ safe_merge (`bool`):
347
+ whether to activate the safe merging check to check if there is any potential Nan in the adapter
348
+ weights
349
+ adapter_names (`List[str]`, *optional*):
350
+ The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
351
+ to `None`.
352
+
353
+ Example:
354
+
355
+ ```py
356
+ >>> from transformers import AutoModelForCausalLM
357
+ >>> from peft import PeftModel
358
+
359
+ >>> base_model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-40b")
360
+ >>> peft_model_id = "smangrul/falcon-40B-int4-peft-lora-sfttrainer-sample"
361
+ >>> model = PeftModel.from_pretrained(base_model, peft_model_id)
362
+ >>> merged_model = model.merge_and_unload()
363
+ ```
364
+ """
365
+ return self._unload_and_optionally_merge(safe_merge=safe_merge, adapter_names=adapter_names)
366
+
367
+ def unload(self) -> torch.nn.Module:
368
+ """
369
+ Gets back the base model by removing all the IA³ modules without merging. This gives back the original base
370
+ model.
371
+ """
372
+ return self._unload_and_optionally_merge(merge=False)
373
+
374
+ def delete_adapter(self, adapter_name: str) -> None:
375
+ """
376
+ Deletes an existing adapter.
377
+
378
+ Args:
379
+ adapter_name (str): Name of the adapter to be deleted.
380
+ """
381
+ if adapter_name not in self.peft_config:
382
+ raise ValueError(f"Adapter {adapter_name} does not exist")
383
+ del self.peft_config[adapter_name]
384
+
385
+ key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key]
386
+ new_adapter = None
387
+ for key in key_list:
388
+ _, target, _ = _get_submodules(self.model, key)
389
+ if isinstance(target, IA3Layer):
390
+ target.delete_adapter(adapter_name)
391
+ if new_adapter is None:
392
+ new_adapter = target.active_adapters[:]
393
+
394
+ self.active_adapter = new_adapter or []
env-llmeval/lib/python3.10/site-packages/peft/tuners/loha/__init__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .config import LoHaConfig
16
+ from .layer import Conv2d, Linear, LoHaLayer
17
+ from .model import LoHaModel
18
+
19
+
20
+ __all__ = ["LoHaConfig", "LoHaModel", "Conv2d", "Linear", "LoHaLayer"]
env-llmeval/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (380 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/config.cpython-310.pyc ADDED
Binary file (5.35 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/layer.cpython-310.pyc ADDED
Binary file (10.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/loha/__pycache__/model.cpython-310.pyc ADDED
Binary file (3.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/loha/config.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from dataclasses import dataclass, field
16
+ from typing import List, Optional, Union
17
+
18
+ from peft.tuners.lycoris_utils import LycorisConfig
19
+ from peft.utils import PeftType
20
+
21
+
22
+ @dataclass
23
+ class LoHaConfig(LycorisConfig):
24
+ """
25
+ This is the configuration class to store the configuration of a [`LoHaModel`].
26
+
27
+ Args:
28
+ r (`int`):
29
+ LoHa rank.
30
+ alpha (`int`):
31
+ The alpha parameter for LoHa scaling.
32
+ rank_dropout (`float`):
33
+ The dropout probability for rank dimension during training.
34
+ module_dropout (`float`):
35
+ The dropout probability for disabling LoHa modules during training.
36
+ use_effective_conv2d (`bool`):
37
+ Use parameter effective decomposition for Conv2d with ksize > 1 ("Proposition 3" from FedPara paper).
38
+ target_modules (`Optional[Union[List[str], str]]`):
39
+ The names of the modules to apply the adapter to. If this is specified, only the modules with the specified
40
+ names will be replaced. When passing a string, a regex match will be performed. When passing a list of
41
+ strings, either an exact match will be performed or it is checked if the name of the module ends with any
42
+ of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen,
43
+ excluding the output layer. If this is not specified, modules will be chosen according to the model
44
+ architecture. If the architecture is not known, an error will be raised -- in this case, you should specify
45
+ the target modules manually.
46
+ init_weights (`bool`):
47
+ Whether to perform initialization of adapter weights. This defaults to `True`, passing `False` is
48
+ discouraged.
49
+ layers_to_transform (`Union[List[int], int]`):
50
+ The layer indices to transform. If a list of ints is passed, it will apply the adapter to the layer indices
51
+ that are specified in this list. If a single integer is passed, it will apply the transformations on the
52
+ layer at this index.
53
+ layers_pattern (`str`):
54
+ The layer pattern name, used only if `layers_to_transform` is different from `None`.
55
+ rank_pattern (`dict`):
56
+ The mapping from layer names or regexp expression to ranks which are different from the default rank
57
+ specified by `r`.
58
+ alpha_pattern (`dict`):
59
+ The mapping from layer names or regexp expression to alphas which are different from the default alpha
60
+ specified by `alpha`.
61
+ modules_to_save (`Optional[List[str]]`):
62
+ List of modules apart from adapter layers to be set as trainable and saved in the final checkpoint.
63
+ """
64
+
65
+ r: int = field(default=8, metadata={"help": "LoHa rank"})
66
+ alpha: int = field(default=8, metadata={"help": "LoHa alpha"})
67
+ rank_dropout: float = field(
68
+ default=0.0, metadata={"help": "The dropout probability for rank dimension during training"}
69
+ )
70
+ module_dropout: float = field(
71
+ default=0.0, metadata={"help": "The dropout probability for disabling LoHa modules during training"}
72
+ )
73
+ use_effective_conv2d: bool = field(
74
+ default=False,
75
+ metadata={
76
+ "help": 'Use parameter effective decomposition for Conv2d 3x3 with ksize > 1 ("Proposition 3" from FedPara paper)'
77
+ },
78
+ )
79
+ target_modules: Optional[Union[List[str], str]] = field(
80
+ default=None,
81
+ metadata={
82
+ "help": "List of module names or regex expression of the module names to replace with LoHa."
83
+ "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' "
84
+ "This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer."
85
+ },
86
+ )
87
+ init_weights: bool = field(
88
+ default=True,
89
+ metadata={
90
+ "help": (
91
+ "Whether to initialize the weights of the LoHa layers with their default initialization. Don't change "
92
+ "this setting, except if you know exactly what you're doing."
93
+ ),
94
+ },
95
+ )
96
+ layers_to_transform: Optional[Union[List[int], int]] = field(
97
+ default=None,
98
+ metadata={
99
+ "help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index."
100
+ },
101
+ )
102
+ layers_pattern: Optional[str] = field(
103
+ default=None,
104
+ metadata={
105
+ "help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern."
106
+ },
107
+ )
108
+ modules_to_save: Optional[List[str]] = field(
109
+ default=None,
110
+ metadata={
111
+ "help": "List of modules apart from LoHA layers to be set as trainable and saved in the final checkpoint. "
112
+ "For example, in Sequence Classification or Token Classification tasks, "
113
+ "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved."
114
+ },
115
+ )
116
+
117
+ def __post_init__(self):
118
+ self.peft_type = PeftType.LOHA
119
+ self.target_modules = (
120
+ set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
121
+ )
env-llmeval/lib/python3.10/site-packages/peft/tuners/loha/layer.py ADDED
@@ -0,0 +1,375 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+ from typing import Any, Set, Tuple
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ import torch.nn.functional as F
21
+
22
+ from peft.tuners.lycoris_utils import LycorisLayer
23
+
24
+
25
+ class LoHaLayer(nn.Module, LycorisLayer):
26
+ # All names of layers that may contain adapter weights
27
+ adapter_layer_names = ("hada_w1_a", "hada_w1_b", "hada_w2_a", "hada_w2_b", "hada_t1", "hada_t2")
28
+ # other_param_names is defined on parent class
29
+
30
+ def __init__(self, base_layer: nn.Module):
31
+ super().__init__()
32
+ LycorisLayer.__init__(self, base_layer)
33
+
34
+ # LoHa info
35
+ self.hada_w1_a = nn.ParameterDict({})
36
+ self.hada_w1_b = nn.ParameterDict({})
37
+ self.hada_w2_a = nn.ParameterDict({})
38
+ self.hada_w2_b = nn.ParameterDict({})
39
+ self.hada_t1 = nn.ParameterDict({})
40
+ self.hada_t2 = nn.ParameterDict({})
41
+
42
+ @property
43
+ def _available_adapters(self) -> Set[str]:
44
+ return {*self.hada_w1_a, *self.hada_w1_b, *self.hada_w2_a, *self.hada_w2_b, *self.hada_t1, *self.hada_t2}
45
+
46
+ def create_adapter_parameters(self, adapter_name: str, r: int, shape: Tuple[int, ...]):
47
+ # https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py#L130C9-L143C75
48
+ if len(shape) == 4:
49
+ self.hada_t1[adapter_name] = nn.Parameter(torch.empty(r, r, shape[2], shape[3]))
50
+ self.hada_w1_a[adapter_name] = nn.Parameter(torch.empty(r, shape[0])) # out_dim, 1-mode
51
+ self.hada_w1_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1])) # in_dim , 2-mode
52
+
53
+ self.hada_t2[adapter_name] = nn.Parameter(torch.empty(r, r, shape[2], shape[3]))
54
+ self.hada_w2_a[adapter_name] = nn.Parameter(torch.empty(r, shape[0])) # out_dim, 1-mode
55
+ self.hada_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1])) # in_dim , 2-mode
56
+ else:
57
+ self.hada_w1_a[adapter_name] = nn.Parameter(torch.empty(shape[0], r))
58
+ self.hada_w1_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1]))
59
+
60
+ self.hada_w2_a[adapter_name] = nn.Parameter(torch.empty(shape[0], r))
61
+ self.hada_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1]))
62
+
63
+ def reset_adapter_parameters(self, adapter_name: str):
64
+ # Original implementation performs initialization with normal distribution
65
+ # https://github.com/KohakuBlueleaf/LyCORIS/blob/3549fdef8f564761d68b695a08ef88b1122fdedc/lycoris/modules/loha.py#L158
66
+
67
+ # FedPara paper proposes to perform He initialization, let's stick with it
68
+ # It is enough to initialize only single matrix with zeros to make adapter do nothing after initialization
69
+ if adapter_name in self.hada_w1_a.keys():
70
+ nn.init.kaiming_uniform_(self.hada_w1_a[adapter_name], a=math.sqrt(5))
71
+ nn.init.kaiming_uniform_(self.hada_w1_b[adapter_name], a=math.sqrt(5))
72
+ nn.init.kaiming_uniform_(self.hada_w2_a[adapter_name], a=math.sqrt(5))
73
+ nn.init.zeros_(self.hada_w2_b[adapter_name])
74
+ if adapter_name in self.hada_t1.keys():
75
+ nn.init.kaiming_uniform_(self.hada_t1[adapter_name], a=math.sqrt(5))
76
+ nn.init.kaiming_uniform_(self.hada_t2[adapter_name], a=math.sqrt(5))
77
+
78
+ def reset_adapter_parameters_random(self, adapter_name: str):
79
+ # Original implementation performs initialization with normal distribution
80
+ # https://github.com/KohakuBlueleaf/LyCORIS/blob/3549fdef8f564761d68b695a08ef88b1122fdedc/lycoris/modules/loha.py#L158
81
+
82
+ # FedPara paper proposes to perform He initialization, let's stick with it
83
+ # It is enough to initialize only single matrix with zeros to make adapter do nothing after initialization
84
+ if adapter_name in self.hada_w1_a.keys():
85
+ nn.init.kaiming_uniform_(self.hada_w1_a[adapter_name], a=math.sqrt(5))
86
+ nn.init.kaiming_uniform_(self.hada_w1_b[adapter_name], a=math.sqrt(5))
87
+ nn.init.kaiming_uniform_(self.hada_w2_a[adapter_name], a=math.sqrt(5))
88
+ nn.init.kaiming_uniform_(self.hada_w2_b[adapter_name], a=math.sqrt(5))
89
+ if adapter_name in self.hada_t1.keys():
90
+ nn.init.kaiming_uniform_(self.hada_t1[adapter_name], a=math.sqrt(5))
91
+ nn.init.kaiming_uniform_(self.hada_t2[adapter_name], a=math.sqrt(5))
92
+
93
+ def update_layer(
94
+ self,
95
+ adapter_name: str,
96
+ r: int,
97
+ alpha: float,
98
+ rank_dropout: float,
99
+ module_dropout: float,
100
+ init_weights: bool,
101
+ use_effective_conv2d: bool = False,
102
+ **kwargs,
103
+ ) -> None:
104
+ """Internal function to create loha adapter
105
+
106
+ Args:
107
+ adapter_name (`str`): Name for the adapter to add.
108
+ r (`int`): Rank for the added adapter.
109
+ alpha (`float`): Alpha for the added adapter.
110
+ rank_dropout (`float`): The dropout probability for rank dimension during training.
111
+ module_dropout (`float`): The dropout probability for disabling adapter during training.
112
+ init_weights (`bool`): Whether to initialize weights.
113
+ use_effective_conv2d (`bool`, *optional*, defaults to `False`):
114
+ Use parameter effective decomposition for Conv2d with ksize > 1.
115
+ """
116
+ if r <= 0:
117
+ raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
118
+
119
+ self.r[adapter_name] = r
120
+ self.alpha[adapter_name] = alpha
121
+ self.scaling[adapter_name] = alpha / r
122
+ self.rank_dropout[adapter_name] = rank_dropout
123
+ self.module_dropout[adapter_name] = module_dropout
124
+
125
+ # Determine shape of LoHa weights
126
+ base_layer = self.get_base_layer()
127
+ if isinstance(base_layer, nn.Linear):
128
+ shape = tuple(base_layer.weight.shape)
129
+ elif isinstance(base_layer, nn.Conv2d):
130
+ use_effective_conv2d = use_effective_conv2d and base_layer.kernel_size != (1, 1)
131
+ if use_effective_conv2d:
132
+ shape = (base_layer.out_channels, base_layer.in_channels, *base_layer.kernel_size)
133
+ else:
134
+ shape = (
135
+ base_layer.out_channels,
136
+ base_layer.in_channels * base_layer.kernel_size[0] * base_layer.kernel_size[1],
137
+ )
138
+ else:
139
+ raise TypeError(f"LoHa is not implemented for base layers of type {type(base_layer).__name__}")
140
+
141
+ # Create weights with provided shape
142
+ self.create_adapter_parameters(adapter_name, r, shape)
143
+
144
+ # Initialize weights
145
+ if init_weights:
146
+ self.reset_adapter_parameters(adapter_name)
147
+ else:
148
+ self.reset_adapter_parameters_random(adapter_name)
149
+
150
+ # Move new weights to device
151
+ weight = getattr(self.get_base_layer(), "weight", None)
152
+ if weight is not None:
153
+ # the layer is already completely initialized, this is an update
154
+ if weight.dtype.is_floating_point or weight.dtype.is_complex:
155
+ self.to(weight.device, dtype=weight.dtype)
156
+ else:
157
+ self.to(weight.device)
158
+ self.set_adapter(self.active_adapters)
159
+
160
+ def get_delta_weight(self, adapter_name: str) -> torch.Tensor:
161
+ # https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py#L178
162
+ if adapter_name in self.hada_t1.keys():
163
+ weight = make_weight_cp(
164
+ self.hada_t1[adapter_name],
165
+ self.hada_w1_a[adapter_name],
166
+ self.hada_w1_b[adapter_name],
167
+ self.hada_t2[adapter_name],
168
+ self.hada_w2_a[adapter_name],
169
+ self.hada_w2_b[adapter_name],
170
+ scale=torch.tensor(self.scaling[adapter_name]),
171
+ )
172
+ else:
173
+ weight = make_weight(
174
+ self.hada_w1_a[adapter_name],
175
+ self.hada_w1_b[adapter_name],
176
+ self.hada_w2_a[adapter_name],
177
+ self.hada_w2_b[adapter_name],
178
+ scale=torch.tensor(self.scaling[adapter_name]),
179
+ )
180
+
181
+ base_layer = self.get_base_layer()
182
+ weight = weight.reshape(base_layer.weight.shape)
183
+
184
+ # Perform rank dropout during training - drop rows of addition weights
185
+ rank_dropout = self.rank_dropout[adapter_name]
186
+ if self.training and rank_dropout:
187
+ drop = (torch.rand(weight.size(0)) > rank_dropout).to(weight.dtype)
188
+ drop = drop.view(-1, *[1] * len(weight.shape[1:])).to(weight.device)
189
+ # TODO: Investigate if there should be a scaler like in normal dropout during training
190
+ # Original implementation doesn't have it
191
+ # https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py#L193
192
+ drop /= drop.mean()
193
+ weight *= drop
194
+
195
+ return weight
196
+
197
+ def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
198
+ previous_dtype = x.dtype
199
+
200
+ if self.disable_adapters:
201
+ if self.merged:
202
+ self.unmerge()
203
+ result = self.base_layer(x, *args, **kwargs)
204
+ elif self.merged:
205
+ result = self.base_layer(x, *args, **kwargs)
206
+ else:
207
+ result = self.base_layer(x, *args, **kwargs)
208
+
209
+ # Execute all the adapters
210
+ for active_adapter in self.active_adapters:
211
+ if active_adapter not in self._available_adapters:
212
+ continue
213
+
214
+ module_dropout = self.module_dropout[active_adapter]
215
+
216
+ # Modify current execution weights
217
+ if (not self.training) or (self.training and torch.rand(1) > module_dropout):
218
+ result = result + self._get_delta_activations(active_adapter, x, *args, **kwargs)
219
+
220
+ result = result.to(previous_dtype)
221
+ return result
222
+
223
+
224
+ class Linear(LoHaLayer):
225
+ """LoHa implemented in Linear layer"""
226
+
227
+ def __init__(
228
+ self,
229
+ base_layer: nn.Module,
230
+ adapter_name: str = "default",
231
+ r: int = 0,
232
+ alpha: float = 0.0,
233
+ rank_dropout: float = 0.0,
234
+ module_dropout: float = 0.0,
235
+ init_weights: bool = True,
236
+ **kwargs,
237
+ ):
238
+ super().__init__(base_layer)
239
+
240
+ # Create adapter and set it active
241
+ self._active_adapter = adapter_name
242
+ self.update_layer(adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, **kwargs)
243
+
244
+ def _get_delta_activations(
245
+ self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any
246
+ ) -> torch.Tensor:
247
+ delta_weight = self.get_delta_weight(adapter_name)
248
+ # don't add bias here, because the bias is already included in the output of the base_layer
249
+ return F.linear(input, delta_weight)
250
+
251
+ def __repr__(self) -> str:
252
+ rep = super().__repr__()
253
+ return "loha." + rep
254
+
255
+
256
+ class Conv2d(LoHaLayer):
257
+ """LoHa implemented in Conv2d layer"""
258
+
259
+ def __init__(
260
+ self,
261
+ base_layer: nn.Module,
262
+ adapter_name: str = "default",
263
+ r: int = 0,
264
+ alpha: float = 0.0,
265
+ rank_dropout: float = 0.0,
266
+ module_dropout: float = 0.0,
267
+ use_effective_conv2d: bool = False,
268
+ init_weights: bool = True,
269
+ **kwargs,
270
+ ):
271
+ super().__init__(base_layer)
272
+
273
+ # Create adapter and set it active
274
+ self._active_adapter = adapter_name
275
+ self.update_layer(
276
+ adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, use_effective_conv2d, **kwargs
277
+ )
278
+
279
+ def _get_delta_activations(
280
+ self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any
281
+ ) -> torch.Tensor:
282
+ delta_weight = self.get_delta_weight(adapter_name)
283
+ # don't add bias here, because the bias is already included in the output of the base_layer
284
+ base_layer = self.get_base_layer()
285
+ return F.conv2d(
286
+ input,
287
+ delta_weight,
288
+ stride=base_layer.stride,
289
+ padding=base_layer.padding,
290
+ dilation=base_layer.dilation,
291
+ groups=base_layer.groups,
292
+ )
293
+
294
+ def __repr__(self) -> str:
295
+ rep = super().__repr__()
296
+ return "loha." + rep
297
+
298
+
299
+ # Below code is a direct copy from https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py#L9
300
+
301
+
302
+ class HadaWeight(torch.autograd.Function):
303
+ @staticmethod
304
+ def forward(ctx, w1a, w1b, w2a, w2b, scale=torch.tensor(1)):
305
+ ctx.save_for_backward(w1a, w1b, w2a, w2b, scale)
306
+ diff_weight = ((w1a @ w1b) * (w2a @ w2b)) * scale
307
+ return diff_weight
308
+
309
+ @staticmethod
310
+ def backward(ctx, grad_out):
311
+ (w1a, w1b, w2a, w2b, scale) = ctx.saved_tensors
312
+ grad_out = grad_out * scale
313
+ temp = grad_out * (w2a @ w2b)
314
+ grad_w1a = temp @ w1b.T
315
+ grad_w1b = w1a.T @ temp
316
+
317
+ temp = grad_out * (w1a @ w1b)
318
+ grad_w2a = temp @ w2b.T
319
+ grad_w2b = w2a.T @ temp
320
+
321
+ del temp
322
+ return grad_w1a, grad_w1b, grad_w2a, grad_w2b, None
323
+
324
+
325
+ class HadaWeightCP(torch.autograd.Function):
326
+ @staticmethod
327
+ def forward(ctx, t1, w1a, w1b, t2, w2a, w2b, scale=torch.tensor(1)):
328
+ ctx.save_for_backward(t1, w1a, w1b, t2, w2a, w2b, scale)
329
+
330
+ rebuild1 = torch.einsum("i j k l, j r, i p -> p r k l", t1, w1b, w1a)
331
+ rebuild2 = torch.einsum("i j k l, j r, i p -> p r k l", t2, w2b, w2a)
332
+
333
+ return rebuild1 * rebuild2 * scale
334
+
335
+ @staticmethod
336
+ def backward(ctx, grad_out):
337
+ (t1, w1a, w1b, t2, w2a, w2b, scale) = ctx.saved_tensors
338
+ grad_out = grad_out * scale
339
+
340
+ temp = torch.einsum("i j k l, j r -> i r k l", t2, w2b)
341
+ rebuild = torch.einsum("i j k l, i r -> r j k l", temp, w2a)
342
+
343
+ grad_w = rebuild * grad_out
344
+ del rebuild
345
+
346
+ grad_w1a = torch.einsum("r j k l, i j k l -> r i", temp, grad_w)
347
+ grad_temp = torch.einsum("i j k l, i r -> r j k l", grad_w, w1a.T)
348
+ del grad_w, temp
349
+
350
+ grad_w1b = torch.einsum("i r k l, i j k l -> r j", t1, grad_temp)
351
+ grad_t1 = torch.einsum("i j k l, j r -> i r k l", grad_temp, w1b.T)
352
+ del grad_temp
353
+
354
+ temp = torch.einsum("i j k l, j r -> i r k l", t1, w1b)
355
+ rebuild = torch.einsum("i j k l, i r -> r j k l", temp, w1a)
356
+
357
+ grad_w = rebuild * grad_out
358
+ del rebuild
359
+
360
+ grad_w2a = torch.einsum("r j k l, i j k l -> r i", temp, grad_w)
361
+ grad_temp = torch.einsum("i j k l, i r -> r j k l", grad_w, w2a.T)
362
+ del grad_w, temp
363
+
364
+ grad_w2b = torch.einsum("i r k l, i j k l -> r j", t2, grad_temp)
365
+ grad_t2 = torch.einsum("i j k l, j r -> i r k l", grad_temp, w2b.T)
366
+ del grad_temp
367
+ return grad_t1, grad_w1a, grad_w1b, grad_t2, grad_w2a, grad_w2b, None
368
+
369
+
370
+ def make_weight(w1a, w1b, w2a, w2b, scale):
371
+ return HadaWeight.apply(w1a, w1b, w2a, w2b, scale)
372
+
373
+
374
+ def make_weight_cp(t1, w1a, w1b, t2, w2a, w2b, scale):
375
+ return HadaWeightCP.apply(t1, w1a, w1b, t2, w2a, w2b, scale)
env-llmeval/lib/python3.10/site-packages/peft/tuners/loha/model.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import re
16
+ from itertools import chain
17
+ from typing import Dict, Type, Union
18
+
19
+ import torch
20
+ from torch import nn
21
+
22
+ from peft.tuners.lycoris_utils import LycorisConfig, LycorisTuner
23
+
24
+ from .layer import Conv2d, Linear, LoHaLayer
25
+
26
+
27
+ class LoHaModel(LycorisTuner):
28
+ """
29
+ Creates Low-Rank Hadamard Product model from a pretrained model. The method is partially described in
30
+ https://arxiv.org/abs/2108.06098 Current implementation heavily borrows from
31
+ https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py
32
+
33
+ Args:
34
+ model (`torch.nn.Module`): The model to which the adapter tuner layers will be attached.
35
+ config ([`LoHaConfig`]): The configuration of the LoHa model.
36
+ adapter_name (`str`): The name of the adapter, defaults to `"default"`.
37
+
38
+ Returns:
39
+ `torch.nn.Module`: The LoHa model.
40
+
41
+ Example:
42
+ ```py
43
+ >>> from diffusers import StableDiffusionPipeline
44
+ >>> from peft import LoHaModel, LoHaConfig
45
+
46
+ >>> config_te = LoHaConfig(
47
+ ... r=8,
48
+ ... lora_alpha=32,
49
+ ... target_modules=["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"],
50
+ ... rank_dropout=0.0,
51
+ ... module_dropout=0.0,
52
+ ... init_weights=True,
53
+ ... )
54
+ >>> config_unet = LoHaConfig(
55
+ ... r=8,
56
+ ... lora_alpha=32,
57
+ ... target_modules=[
58
+ ... "proj_in",
59
+ ... "proj_out",
60
+ ... "to_k",
61
+ ... "to_q",
62
+ ... "to_v",
63
+ ... "to_out.0",
64
+ ... "ff.net.0.proj",
65
+ ... "ff.net.2",
66
+ ... ],
67
+ ... rank_dropout=0.0,
68
+ ... module_dropout=0.0,
69
+ ... init_weights=True,
70
+ ... use_effective_conv2d=True,
71
+ ... )
72
+
73
+ >>> model = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
74
+ >>> model.text_encoder = LoHaModel(model.text_encoder, config_te, "default")
75
+ >>> model.unet = LoHaModel(model.unet, config_unet, "default")
76
+ ```
77
+
78
+ **Attributes**:
79
+ - **model** ([`~torch.nn.Module`]) -- The model to be adapted.
80
+ - **peft_config** ([`LoHaConfig`]): The configuration of the LoHa model.
81
+ """
82
+
83
+ prefix: str = "hada_"
84
+ layers_mapping: Dict[Type[torch.nn.Module], Type[LoHaLayer]] = {
85
+ torch.nn.Conv2d: Conv2d,
86
+ torch.nn.Linear: Linear,
87
+ }
88
+
89
+ def _create_and_replace(
90
+ self,
91
+ config: LycorisConfig,
92
+ adapter_name: str,
93
+ target: Union[LoHaLayer, nn.Module],
94
+ target_name: str,
95
+ parent: nn.Module,
96
+ current_key: str,
97
+ ) -> None:
98
+ """
99
+ A private method to create and replace the target module with the adapter module.
100
+ """
101
+
102
+ # Regexp matching - Find key which matches current target_name in patterns provided
103
+ pattern_keys = list(chain(config.rank_pattern.keys(), config.alpha_pattern.keys()))
104
+ target_name_key = next(filter(lambda key: re.match(rf"(.*\.)?{key}$", current_key), pattern_keys), target_name)
105
+
106
+ kwargs = config.to_dict()
107
+ kwargs["r"] = config.rank_pattern.get(target_name_key, config.r)
108
+ kwargs["alpha"] = config.alpha_pattern.get(target_name_key, config.alpha)
109
+
110
+ if isinstance(target, LoHaLayer):
111
+ target.update_layer(adapter_name, **kwargs)
112
+ else:
113
+ new_module = self._create_new_module(config, adapter_name, target, **kwargs)
114
+ self._replace_module(parent, target_name, new_module, target)
env-llmeval/lib/python3.10/site-packages/peft/tuners/lokr/__init__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .config import LoKrConfig
16
+ from .layer import Conv2d, Linear, LoKrLayer
17
+ from .model import LoKrModel
18
+
19
+
20
+ __all__ = ["LoKrConfig", "LoKrModel", "Conv2d", "Linear", "LoKrLayer"]
env-llmeval/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (380 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/config.cpython-310.pyc ADDED
Binary file (5.64 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/layer.cpython-310.pyc ADDED
Binary file (10.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/model.cpython-310.pyc ADDED
Binary file (3.95 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/lokr/config.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from dataclasses import dataclass, field
16
+ from typing import List, Optional, Union
17
+
18
+ from peft.tuners.lycoris_utils import LycorisConfig
19
+ from peft.utils import PeftType
20
+
21
+
22
+ @dataclass
23
+ class LoKrConfig(LycorisConfig):
24
+ """
25
+ Configuration class of [`LoKrModel`].
26
+
27
+ Args:
28
+ r (`int`):
29
+ LoKr rank.
30
+ alpha (`int`):
31
+ The alpha parameter for LoKr scaling.
32
+ rank_dropout (`float`):
33
+ The dropout probability for rank dimension during training.
34
+ module_dropout (`float`):
35
+ The dropout probability for disabling LoKr modules during training.
36
+ use_effective_conv2d (`bool`):
37
+ Use parameter effective decomposition for Conv2d with ksize > 1 ("Proposition 3" from FedPara paper).
38
+ decompose_both (`bool`):
39
+ Perform rank decomposition of left kronecker product matrix.
40
+ decompose_factor (`int`):
41
+ Kronecker product decomposition factor.
42
+ target_modules (`Optional[Union[List[str], str]]`):
43
+ The names of the modules to apply the adapter to. If this is specified, only the modules with the specified
44
+ names will be replaced. When passing a string, a regex match will be performed. When passing a list of
45
+ strings, either an exact match will be performed or it is checked if the name of the module ends with any
46
+ of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen,
47
+ excluding the output layer. If this is not specified, modules will be chosen according to the model
48
+ architecture. If the architecture is not known, an error will be raised -- in this case, you should specify
49
+ the target modules manually.
50
+ init_weights (`bool`):
51
+ Whether to perform initialization of adapter weights. This defaults to `True`, passing `False` is
52
+ discouraged.
53
+ layers_to_transform (`Union[List[int], int]`):
54
+ The layer indices to transform. If a list of ints is passed, it will apply the adapter to the layer indices
55
+ that are specified in this list. If a single integer is passed, it will apply the transformations on the
56
+ layer at this index.
57
+ layers_pattern (`str`):
58
+ The layer pattern name, used only if `layers_to_transform` is different from `None`.
59
+ rank_pattern (`dict`):
60
+ The mapping from layer names or regexp expression to ranks which are different from the default rank
61
+ specified by `r`.
62
+ alpha_pattern (`dict`):
63
+ The mapping from layer names or regexp expression to alphas which are different from the default alpha
64
+ specified by `alpha`.
65
+ modules_to_save (`Optional[List[str]]`):
66
+ List of modules apart from adapter layers to be set as trainable and saved in the final checkpoint.
67
+ """
68
+
69
+ r: int = field(default=8, metadata={"help": "LoKr rank"})
70
+ alpha: int = field(default=8, metadata={"help": "LoKr alpha"})
71
+ rank_dropout: float = field(
72
+ default=0.0, metadata={"help": "The dropout probability for rank dimension during training"}
73
+ )
74
+ module_dropout: float = field(
75
+ default=0.0, metadata={"help": "The dropout probability for disabling LoKr modules during training"}
76
+ )
77
+ use_effective_conv2d: bool = field(
78
+ default=False,
79
+ metadata={
80
+ "help": 'Use parameter effective decomposition for Conv2d 3x3 with ksize > 1 ("Proposition 3" from FedPara paper)'
81
+ },
82
+ )
83
+ decompose_both: bool = field(
84
+ default=False,
85
+ metadata={"help": "Perform rank decomposition of left kronecker product matrix."},
86
+ )
87
+ decompose_factor: int = field(default=-1, metadata={"help": "Kronecker product decomposition factor."})
88
+ target_modules: Optional[Union[List[str], str]] = field(
89
+ default=None,
90
+ metadata={
91
+ "help": "List of module names or regex expression of the module names to replace with LoKr."
92
+ "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' "
93
+ "This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer."
94
+ },
95
+ )
96
+ init_weights: bool = field(
97
+ default=True,
98
+ metadata={
99
+ "help": (
100
+ "Whether to initialize the weights of the LoKr layers with their default initialization. Don't change "
101
+ "this setting, except if you know exactly what you're doing."
102
+ ),
103
+ },
104
+ )
105
+ layers_to_transform: Optional[Union[List[int], int]] = field(
106
+ default=None,
107
+ metadata={
108
+ "help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index."
109
+ },
110
+ )
111
+ layers_pattern: Optional[str] = field(
112
+ default=None,
113
+ metadata={
114
+ "help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern."
115
+ },
116
+ )
117
+ modules_to_save: Optional[List[str]] = field(
118
+ default=None,
119
+ metadata={
120
+ "help": "List of modules apart from LoKr layers to be set as trainable and saved in the final checkpoint. "
121
+ "For example, in Sequence Classification or Token Classification tasks, "
122
+ "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved."
123
+ },
124
+ )
125
+
126
+ def __post_init__(self):
127
+ self.peft_type = PeftType.LOKR
env-llmeval/lib/python3.10/site-packages/peft/tuners/lokr/layer.py ADDED
@@ -0,0 +1,409 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+ from typing import Any, Optional, Set, Tuple, Union
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ import torch.nn.functional as F
21
+
22
+ from peft.tuners.lycoris_utils import LycorisLayer
23
+
24
+
25
+ class LoKrLayer(nn.Module, LycorisLayer):
26
+ # All names of layers that may contain adapter weights
27
+ adapter_layer_names = (
28
+ "lokr_w1",
29
+ "lokr_w1_a",
30
+ "lokr_w1_b",
31
+ "lokr_w2",
32
+ "lokr_w2_a",
33
+ "lokr_w2_b",
34
+ "lokr_t2",
35
+ )
36
+ # other_param_names is defined on parent class
37
+
38
+ def __init__(self, base_layer: nn.Module) -> None:
39
+ super().__init__()
40
+ LycorisLayer.__init__(self, base_layer)
41
+
42
+ # LoKr info
43
+ self.lokr_w1 = nn.ParameterDict({})
44
+ self.lokr_w1_a = nn.ParameterDict({})
45
+ self.lokr_w1_b = nn.ParameterDict({})
46
+ self.lokr_w2 = nn.ParameterDict({})
47
+ self.lokr_w2_a = nn.ParameterDict({})
48
+ self.lokr_w2_b = nn.ParameterDict({})
49
+ self.lokr_t2 = nn.ParameterDict({})
50
+
51
+ @property
52
+ def _available_adapters(self) -> Set[str]:
53
+ return {
54
+ *self.lokr_w1,
55
+ *self.lokr_w1_a,
56
+ *self.lokr_w1_b,
57
+ *self.lokr_w2,
58
+ *self.lokr_w2_a,
59
+ *self.lokr_w2_b,
60
+ *self.lokr_t2,
61
+ }
62
+
63
+ def create_adapter_parameters(
64
+ self,
65
+ adapter_name: str,
66
+ r: int,
67
+ shape,
68
+ use_w1: bool,
69
+ use_w2: bool,
70
+ use_effective_conv2d: bool,
71
+ ):
72
+ if use_w1:
73
+ self.lokr_w1[adapter_name] = nn.Parameter(torch.empty(shape[0][0], shape[1][0]))
74
+ else:
75
+ self.lokr_w1_a[adapter_name] = nn.Parameter(torch.empty(shape[0][0], r))
76
+ self.lokr_w1_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][0]))
77
+
78
+ if len(shape) == 4:
79
+ # Conv2d
80
+ if use_w2:
81
+ self.lokr_w2[adapter_name] = nn.Parameter(torch.empty(shape[0][1], shape[1][1], *shape[2:]))
82
+ elif use_effective_conv2d:
83
+ self.lokr_t2[adapter_name] = nn.Parameter(torch.empty(r, r, shape[2], shape[3]))
84
+ self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(r, shape[0][1])) # b, 1-mode
85
+ self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1])) # d, 2-mode
86
+ else:
87
+ self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(shape[0][1], r))
88
+ self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1] * shape[2] * shape[3]))
89
+ else:
90
+ # Linear
91
+ if use_w2:
92
+ self.lokr_w2[adapter_name] = nn.Parameter(torch.empty(shape[0][1], shape[1][1]))
93
+ else:
94
+ self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(shape[0][1], r))
95
+ self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1]))
96
+
97
+ def reset_adapter_parameters(self, adapter_name: str):
98
+ if adapter_name in self.lokr_w1:
99
+ nn.init.zeros_(self.lokr_w1[adapter_name])
100
+ else:
101
+ nn.init.zeros_(self.lokr_w1_a[adapter_name])
102
+ nn.init.kaiming_uniform_(self.lokr_w1_b[adapter_name], a=math.sqrt(5))
103
+
104
+ if adapter_name in self.lokr_w2:
105
+ nn.init.kaiming_uniform_(self.lokr_w2[adapter_name], a=math.sqrt(5))
106
+ else:
107
+ nn.init.kaiming_uniform_(self.lokr_w2_a[adapter_name], a=math.sqrt(5))
108
+ nn.init.kaiming_uniform_(self.lokr_w2_b[adapter_name], a=math.sqrt(5))
109
+
110
+ if adapter_name in self.lokr_t2:
111
+ nn.init.kaiming_uniform_(self.lokr_t2[adapter_name], a=math.sqrt(5))
112
+
113
+ def reset_adapter_parameters_random(self, adapter_name: str):
114
+ if adapter_name in self.lokr_w1:
115
+ nn.init.kaiming_uniform_(self.lokr_w1[adapter_name], a=math.sqrt(5))
116
+ else:
117
+ nn.init.kaiming_uniform_(self.lokr_w1_a[adapter_name], a=math.sqrt(5))
118
+ nn.init.kaiming_uniform_(self.lokr_w1_b[adapter_name], a=math.sqrt(5))
119
+
120
+ if adapter_name in self.lokr_w2:
121
+ nn.init.kaiming_uniform_(self.lokr_w2[adapter_name], a=math.sqrt(5))
122
+ else:
123
+ nn.init.kaiming_uniform_(self.lokr_w2_a[adapter_name], a=math.sqrt(5))
124
+ nn.init.kaiming_uniform_(self.lokr_w2_b[adapter_name], a=math.sqrt(5))
125
+
126
+ if adapter_name in self.lokr_t2:
127
+ nn.init.kaiming_uniform_(self.lokr_t2[adapter_name], a=math.sqrt(5))
128
+
129
+ def update_layer(
130
+ self,
131
+ adapter_name: str,
132
+ r: int,
133
+ alpha: float,
134
+ rank_dropout: float,
135
+ module_dropout: float,
136
+ init_weights: bool,
137
+ use_effective_conv2d: bool,
138
+ decompose_both: bool,
139
+ decompose_factor: int,
140
+ **kwargs,
141
+ ) -> None:
142
+ """Internal function to create lokr adapter
143
+
144
+ Args:
145
+ adapter_name (`str`): Name for the adapter to add.
146
+ r (`int`): Rank for the added adapter.
147
+ alpha (`float`): Alpha for the added adapter.
148
+ rank_dropout (`float`): The dropout probability for rank dimension during training
149
+ module_dropout (`float`): The dropout probability for disabling adapter during training.
150
+ init_weights (`bool`): Whether to initialize adapter weights.
151
+ use_effective_conv2d (`bool`): Use parameter effective decomposition for Conv2d with ksize > 1.
152
+ decompose_both (`bool`): Perform rank decomposition of left kronecker product matrix.
153
+ decompose_factor (`int`): Kronecker product decomposition factor.
154
+ """
155
+ if r <= 0:
156
+ raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
157
+
158
+ self.r[adapter_name] = r
159
+ self.alpha[adapter_name] = alpha
160
+ self.scaling[adapter_name] = alpha / r
161
+ self.rank_dropout[adapter_name] = rank_dropout
162
+ self.module_dropout[adapter_name] = module_dropout
163
+ base_layer = self.get_base_layer()
164
+
165
+ # Determine shape of LoKr weights
166
+ if isinstance(base_layer, nn.Linear):
167
+ in_dim, out_dim = base_layer.in_features, base_layer.out_features
168
+
169
+ in_m, in_n = factorization(in_dim, decompose_factor)
170
+ out_l, out_k = factorization(out_dim, decompose_factor)
171
+ shape = ((out_l, out_k), (in_m, in_n)) # ((a, b), (c, d)), out_dim = a*c, in_dim = b*d
172
+
173
+ use_w1 = not (decompose_both and r < max(shape[0][0], shape[1][0]) / 2)
174
+ use_w2 = not (r < max(shape[0][1], shape[1][1]) / 2)
175
+ use_effective_conv2d = False
176
+ elif isinstance(base_layer, nn.Conv2d):
177
+ in_dim, out_dim = base_layer.in_channels, base_layer.out_channels
178
+ k_size = base_layer.kernel_size
179
+
180
+ in_m, in_n = factorization(in_dim, decompose_factor)
181
+ out_l, out_k = factorization(out_dim, decompose_factor)
182
+ shape = ((out_l, out_k), (in_m, in_n), *k_size) # ((a, b), (c, d), *k_size)
183
+
184
+ use_w1 = not (decompose_both and r < max(shape[0][0], shape[1][0]) / 2)
185
+ use_w2 = r >= max(shape[0][1], shape[1][1]) / 2
186
+ use_effective_conv2d = use_effective_conv2d and base_layer.kernel_size != (1, 1)
187
+ else:
188
+ raise TypeError(f"LoKr is not implemented for base layers of type {type(base_layer).__name__}")
189
+
190
+ # Create weights with provided shape
191
+ self.create_adapter_parameters(adapter_name, r, shape, use_w1, use_w2, use_effective_conv2d)
192
+
193
+ # Initialize weights
194
+ if init_weights:
195
+ self.reset_adapter_parameters(adapter_name)
196
+ else:
197
+ self.reset_adapter_parameters_random(adapter_name)
198
+
199
+ # Move new weights to device
200
+ weight = getattr(self.get_base_layer(), "weight", None)
201
+ if weight is not None:
202
+ # the layer is already completely initialized, this is an update
203
+ if weight.dtype.is_floating_point or weight.dtype.is_complex:
204
+ self.to(weight.device, dtype=weight.dtype)
205
+ else:
206
+ self.to(weight.device)
207
+ self.set_adapter(self.active_adapters)
208
+
209
+ def get_delta_weight(self, adapter_name: str) -> torch.Tensor:
210
+ # https://github.com/KohakuBlueleaf/LyCORIS/blob/e4259b870d3354a9615a96be61cb5d07455c58ea/lycoris/modules/lokr.py#L224
211
+ if adapter_name in self.lokr_w1:
212
+ w1 = self.lokr_w1[adapter_name]
213
+ else:
214
+ w1 = self.lokr_w1_a[adapter_name] @ self.lokr_w1_b[adapter_name]
215
+
216
+ if adapter_name in self.lokr_w2:
217
+ w2 = self.lokr_w2[adapter_name]
218
+ elif adapter_name in self.lokr_t2:
219
+ w2 = make_weight_cp(self.lokr_t2[adapter_name], self.lokr_w2_a[adapter_name], self.lokr_w2_b[adapter_name])
220
+ else:
221
+ w2 = self.lokr_w2_a[adapter_name] @ self.lokr_w2_b[adapter_name]
222
+
223
+ # Make weights with Kronecker product
224
+ weight = make_kron(w1, w2)
225
+ weight = weight.reshape(self.get_base_layer().weight.shape)
226
+
227
+ # Perform rank dropout during training - drop rows of addition weights
228
+ rank_dropout = self.rank_dropout[adapter_name]
229
+ if self.training and rank_dropout:
230
+ drop = (torch.rand(weight.size(0)) > rank_dropout).float()
231
+ drop = drop.view(-1, *[1] * len(weight.shape[1:])).to(weight.device)
232
+ drop /= drop.mean()
233
+ weight *= drop
234
+
235
+ return weight
236
+
237
+ def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
238
+ previous_dtype = x.dtype
239
+
240
+ if self.disable_adapters:
241
+ if self.merged:
242
+ self.unmerge()
243
+ result = self.base_layer(x, *args, **kwargs)
244
+ elif self.merged:
245
+ result = self.base_layer(x, *args, **kwargs)
246
+ else:
247
+ result = self.base_layer(x, *args, **kwargs)
248
+
249
+ # Execute all the adapters
250
+ for active_adapter in self.active_adapters:
251
+ if active_adapter not in self._available_adapters:
252
+ continue
253
+
254
+ module_dropout = self.module_dropout[active_adapter]
255
+
256
+ # Modify current execution weights
257
+ if (not self.training) or (self.training and torch.rand(1) > module_dropout):
258
+ result = result + self._get_delta_activations(active_adapter, x, *args, **kwargs)
259
+
260
+ result = result.to(previous_dtype)
261
+ return result
262
+
263
+
264
+ class Linear(LoKrLayer):
265
+ """LoKr implemented in Linear layer"""
266
+
267
+ def __init__(
268
+ self,
269
+ base_layer: nn.Module,
270
+ device: Optional[Union[str, torch.device]] = None,
271
+ dtype: Optional[torch.dtype] = None,
272
+ adapter_name: str = "default",
273
+ r: int = 0,
274
+ alpha: float = 0.0,
275
+ rank_dropout: float = 0.0,
276
+ module_dropout: float = 0.0,
277
+ init_weights: bool = True,
278
+ **kwargs,
279
+ ):
280
+ super().__init__(base_layer)
281
+
282
+ # Create adapter and set it active
283
+ self._active_adapter = adapter_name
284
+ self.update_layer(adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, **kwargs)
285
+
286
+ def _get_delta_activations(
287
+ self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any
288
+ ) -> torch.Tensor:
289
+ delta_weight = self.get_delta_weight(adapter_name)
290
+ # don't add bias here, because the bias is already included in the output of the base_layer
291
+ return F.linear(input, delta_weight)
292
+
293
+ def __repr__(self) -> str:
294
+ rep = super().__repr__()
295
+ return "lokr." + rep
296
+
297
+
298
+ class Conv2d(LoKrLayer):
299
+ """LoKr implemented in Conv2d layer"""
300
+
301
+ def __init__(
302
+ self,
303
+ base_layer: nn.Module,
304
+ device: Optional[Union[str, torch.device]] = None,
305
+ dtype: Optional[torch.dtype] = None,
306
+ adapter_name: str = "default",
307
+ r: int = 0,
308
+ alpha: float = 0.0,
309
+ rank_dropout: float = 0.0,
310
+ module_dropout: float = 0.0,
311
+ use_effective_conv2d: bool = False,
312
+ init_weights: bool = True,
313
+ **kwargs,
314
+ ):
315
+ super().__init__(base_layer)
316
+
317
+ # Create adapter and set it active
318
+ self._active_adapter = adapter_name
319
+ self.update_layer(
320
+ adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, use_effective_conv2d, **kwargs
321
+ )
322
+
323
+ def _get_delta_activations(
324
+ self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any
325
+ ) -> torch.Tensor:
326
+ delta_weight = self.get_delta_weight(adapter_name)
327
+ # don't add bias here, because the bias is already included in the output of the base_layer
328
+ base_layer = self.get_base_layer()
329
+ return F.conv2d(
330
+ input,
331
+ delta_weight,
332
+ stride=base_layer.stride,
333
+ padding=base_layer.padding,
334
+ dilation=base_layer.dilation,
335
+ groups=base_layer.groups,
336
+ )
337
+
338
+ def __repr__(self) -> str:
339
+ rep = super().__repr__()
340
+ return "lokr." + rep
341
+
342
+
343
+ # Below code is a direct copy from https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/lokr.py#L11
344
+
345
+
346
+ def factorization(dimension: int, factor: int = -1) -> Tuple[int, int]:
347
+ """Factorizes the provided number into the product of two numbers
348
+
349
+ Args:
350
+ dimension (`int`): The number that needs to be factorized.
351
+ factor (`int`, optional):
352
+ Factorization divider. The algorithm will try to output two numbers, one of each will be as close to the
353
+ factor as possible. If -1 is provided, the decomposition algorithm would try to search dividers near the
354
+ square root of the dimension. Defaults to -1.
355
+
356
+ Returns:
357
+ Tuple[`int`, `int`]: A tuple of two numbers, whose product is equal to the provided number. The first number is
358
+ always less than or equal to the second.
359
+
360
+ Example:
361
+ ```py
362
+ >>> factorization(256, factor=-1)
363
+ (16, 16)
364
+
365
+ >>> factorization(128, factor=-1)
366
+ (8, 16)
367
+
368
+ >>> factorization(127, factor=-1)
369
+ (1, 127)
370
+
371
+ >>> factorization(128, factor=4)
372
+ (4, 32)
373
+ ```
374
+ """
375
+
376
+ if factor > 0 and (dimension % factor) == 0:
377
+ m = factor
378
+ n = dimension // factor
379
+ return m, n
380
+ if factor == -1:
381
+ factor = dimension
382
+ m, n = 1, dimension
383
+ length = m + n
384
+ while m < n:
385
+ new_m = m + 1
386
+ while dimension % new_m != 0:
387
+ new_m += 1
388
+ new_n = dimension // new_m
389
+ if new_m + new_n > length or new_m > factor:
390
+ break
391
+ else:
392
+ m, n = new_m, new_n
393
+ if m > n:
394
+ n, m = m, n
395
+ return m, n
396
+
397
+
398
+ def make_weight_cp(t, wa, wb):
399
+ rebuild2 = torch.einsum("i j k l, i p, j r -> p r k l", t, wa, wb) # [c, d, k1, k2]
400
+ return rebuild2
401
+
402
+
403
+ def make_kron(w1, w2, scale=1.0):
404
+ if len(w2.shape) == 4:
405
+ w1 = w1.unsqueeze(2).unsqueeze(2)
406
+ w2 = w2.contiguous()
407
+ rebuild = torch.kron(w1, w2)
408
+
409
+ return rebuild * scale
env-llmeval/lib/python3.10/site-packages/peft/tuners/lokr/model.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import re
16
+ from itertools import chain
17
+ from typing import Dict, Type, Union
18
+
19
+ import torch
20
+ from torch import nn
21
+
22
+ from peft.tuners.lycoris_utils import LycorisConfig, LycorisTuner
23
+
24
+ from .layer import Conv2d, Linear, LoKrLayer
25
+
26
+
27
+ class LoKrModel(LycorisTuner):
28
+ """
29
+ Creates Low-Rank Kronecker Product model from a pretrained model. The original method is partially described in
30
+ https://arxiv.org/abs/2108.06098 and in https://arxiv.org/abs/2309.14859 Current implementation heavily borrows
31
+ from
32
+ https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/lokr.py
33
+
34
+ Args:
35
+ model (`torch.nn.Module`): The model to which the adapter tuner layers will be attached.
36
+ config ([`LoKrConfig`]): The configuration of the LoKr model.
37
+ adapter_name (`str`): The name of the adapter, defaults to `"default"`.
38
+
39
+ Returns:
40
+ `torch.nn.Module`: The LoKr model.
41
+
42
+ Example:
43
+ ```py
44
+ >>> from diffusers import StableDiffusionPipeline
45
+ >>> from peft import LoKrModel, LoKrConfig
46
+
47
+ >>> config_te = LoKrConfig(
48
+ ... r=8,
49
+ ... lora_alpha=32,
50
+ ... target_modules=["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"],
51
+ ... rank_dropout=0.0,
52
+ ... module_dropout=0.0,
53
+ ... init_weights=True,
54
+ ... )
55
+ >>> config_unet = LoKrConfig(
56
+ ... r=8,
57
+ ... lora_alpha=32,
58
+ ... target_modules=[
59
+ ... "proj_in",
60
+ ... "proj_out",
61
+ ... "to_k",
62
+ ... "to_q",
63
+ ... "to_v",
64
+ ... "to_out.0",
65
+ ... "ff.net.0.proj",
66
+ ... "ff.net.2",
67
+ ... ],
68
+ ... rank_dropout=0.0,
69
+ ... module_dropout=0.0,
70
+ ... init_weights=True,
71
+ ... use_effective_conv2d=True,
72
+ ... )
73
+
74
+ >>> model = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
75
+ >>> model.text_encoder = LoKrModel(model.text_encoder, config_te, "default")
76
+ >>> model.unet = LoKrModel(model.unet, config_unet, "default")
77
+ ```
78
+
79
+ **Attributes**:
80
+ - **model** ([`~torch.nn.Module`]) -- The model to be adapted.
81
+ - **peft_config** ([`LoKrConfig`]): The configuration of the LoKr model.
82
+ """
83
+
84
+ prefix: str = "lokr_"
85
+ layers_mapping: Dict[Type[torch.nn.Module], Type[LoKrLayer]] = {
86
+ torch.nn.Conv2d: Conv2d,
87
+ torch.nn.Linear: Linear,
88
+ }
89
+
90
+ def _create_and_replace(
91
+ self,
92
+ config: LycorisConfig,
93
+ adapter_name: str,
94
+ target: Union[LoKrLayer, nn.Module],
95
+ target_name: str,
96
+ parent: nn.Module,
97
+ current_key: str,
98
+ ) -> None:
99
+ """
100
+ A private method to create and replace the target module with the adapter module.
101
+ """
102
+
103
+ # Regexp matching - Find key which matches current target_name in patterns provided
104
+ pattern_keys = list(chain(config.rank_pattern.keys(), config.alpha_pattern.keys()))
105
+ target_name_key = next(filter(lambda key: re.match(rf"(.*\.)?{key}$", current_key), pattern_keys), target_name)
106
+
107
+ kwargs = config.to_dict()
108
+ kwargs["r"] = config.rank_pattern.get(target_name_key, config.r)
109
+ kwargs["alpha"] = config.alpha_pattern.get(target_name_key, config.alpha)
110
+
111
+ if isinstance(target, LoKrLayer):
112
+ target.update_layer(adapter_name, **kwargs)
113
+ else:
114
+ new_module = self._create_new_module(config, adapter_name, target, **kwargs)
115
+ self._replace_module(parent, target_name, new_module, target)
env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/__init__.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from peft.import_utils import is_bnb_4bit_available, is_bnb_available
16
+
17
+ from .config import LoftQConfig, LoraConfig
18
+ from .gptq import QuantLinear
19
+ from .layer import Conv2d, Embedding, Linear, LoraLayer
20
+ from .model import LoraModel
21
+
22
+
23
+ __all__ = ["LoraConfig", "LoftQConfig", "Conv2d", "Embedding", "LoraLayer", "Linear", "LoraModel", "QuantLinear"]
24
+
25
+
26
+ def __getattr__(name):
27
+ if (name == "Linear8bitLt") and is_bnb_available():
28
+ from .bnb import Linear8bitLt
29
+
30
+ return Linear8bitLt
31
+
32
+ if (name == "Linear4bit") and is_bnb_4bit_available():
33
+ from .bnb import Linear4bit
34
+
35
+ return Linear4bit
36
+
37
+ raise AttributeError(f"module {__name__} has no attribute {name}")
env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (901 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/aqlm.cpython-310.pyc ADDED
Binary file (2.29 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/awq.cpython-310.pyc ADDED
Binary file (2.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/bnb.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/config.cpython-310.pyc ADDED
Binary file (14.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/gptq.cpython-310.pyc ADDED
Binary file (2.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/layer.cpython-310.pyc ADDED
Binary file (22.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/model.cpython-310.pyc ADDED
Binary file (25.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/tp_layer.cpython-310.pyc ADDED
Binary file (5.03 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/aqlm.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Any, Optional
16
+
17
+ import torch
18
+
19
+ from peft.import_utils import is_aqlm_available
20
+ from peft.tuners.lora.layer import LoraLayer
21
+ from peft.tuners.tuners_utils import BaseTunerLayer
22
+
23
+
24
+ if is_aqlm_available():
25
+ from aqlm import QuantizedLinear
26
+
27
+
28
+ class AqlmLoraLinear(torch.nn.Module, LoraLayer):
29
+ def __init__(
30
+ self,
31
+ base_layer,
32
+ adapter_name: str,
33
+ r: int = 0,
34
+ lora_alpha: int = 1,
35
+ lora_dropout: float = 0.0,
36
+ init_lora_weights: bool = True,
37
+ use_rslora: bool = False,
38
+ **kwargs,
39
+ ):
40
+ super().__init__()
41
+ LoraLayer.__init__(self, base_layer)
42
+
43
+ self._active_adapter = adapter_name
44
+ self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora)
45
+
46
+ def forward(self, x: torch.Tensor):
47
+ # note: logic differs from default Linear because merging is not supported
48
+ result = self.base_layer(x)
49
+
50
+ if self.disable_adapters:
51
+ return result
52
+
53
+ for active_adapter in self.active_adapters:
54
+ if active_adapter not in self.lora_A.keys():
55
+ continue
56
+ lora_A = self.lora_A[active_adapter]
57
+ lora_B = self.lora_B[active_adapter]
58
+ dropout = self.lora_dropout[active_adapter]
59
+ scaling = self.scaling[active_adapter]
60
+
61
+ requires_conversion = not torch.is_autocast_enabled()
62
+ if requires_conversion:
63
+ expected_dtype = result.dtype
64
+ x = x.to(lora_A.weight.dtype)
65
+
66
+ output = lora_B(lora_A(dropout(x)))
67
+ if requires_conversion:
68
+ output = output.to(expected_dtype)
69
+ output = output * scaling
70
+ result += output
71
+ return result
72
+
73
+ def __repr__(self) -> str:
74
+ rep = super().__repr__()
75
+ return "lora." + rep
76
+
77
+ # TODO: Check if it is better as suggested by users https://github.com/PanQiWei/AutoGPTQ/pull/102
78
+ # def reset_lora_parameters(self, adapter_name):
79
+ # if adapter_name in self.lora_A.keys():
80
+ # torch.nn.init.xavier_uniform_(self.lora_A[adapter_name].weight)
81
+ # torch.nn.init.zeros_(self.lora_B[adapter_name].weight)
82
+
83
+
84
+ def dispatch_aqlm(
85
+ target: torch.nn.Module,
86
+ adapter_name: str,
87
+ **kwargs: Any,
88
+ ) -> Optional[torch.nn.Module]:
89
+ new_module = None
90
+
91
+ if isinstance(target, BaseTunerLayer):
92
+ target_base_layer = target.get_base_layer()
93
+ else:
94
+ target_base_layer = target
95
+
96
+ if is_aqlm_available() and isinstance(target_base_layer, QuantizedLinear):
97
+ new_module = AqlmLoraLinear(target, adapter_name, **kwargs)
98
+ target.qweight = target_base_layer.codes
99
+
100
+ return new_module
env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/awq.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import importlib.metadata as importlib_metadata
15
+ from typing import Any, Optional
16
+
17
+ import packaging.version
18
+ import torch
19
+
20
+ from peft.import_utils import is_auto_awq_available
21
+ from peft.tuners.lora.layer import LoraLayer
22
+ from peft.tuners.tuners_utils import BaseTunerLayer
23
+
24
+
25
+ if is_auto_awq_available():
26
+ from awq.modules.linear import WQLinear_GEMM
27
+
28
+
29
+ class AwqLoraLinear(torch.nn.Module, LoraLayer):
30
+ def __init__(
31
+ self,
32
+ base_layer,
33
+ adapter_name,
34
+ r: int = 0,
35
+ lora_alpha: int = 1,
36
+ lora_dropout: float = 0.0,
37
+ init_lora_weights: bool = True,
38
+ use_rslora: bool = False,
39
+ **kwargs,
40
+ ):
41
+ super().__init__()
42
+ LoraLayer.__init__(self, base_layer)
43
+
44
+ # self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter
45
+ # for backwards compatibility
46
+ self.quant_linear_module = base_layer
47
+
48
+ self._active_adapter = adapter_name
49
+ self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora)
50
+
51
+ def forward(self, x: torch.Tensor):
52
+ result = self.quant_linear_module(x)
53
+
54
+ if self.disable_adapters:
55
+ return result
56
+
57
+ for active_adapter in self.active_adapters:
58
+ if active_adapter not in self.lora_A.keys():
59
+ continue
60
+ lora_A = self.lora_A[active_adapter]
61
+ lora_B = self.lora_B[active_adapter]
62
+ dropout = self.lora_dropout[active_adapter]
63
+ scaling = self.scaling[active_adapter]
64
+
65
+ requires_conversion = not torch.is_autocast_enabled()
66
+ if requires_conversion:
67
+ expected_dtype = result.dtype
68
+ x = x.to(lora_A.weight.dtype)
69
+
70
+ output = lora_B(lora_A(dropout(x)))
71
+ if requires_conversion:
72
+ output = output.to(expected_dtype)
73
+ output = output * scaling
74
+ result = result + output
75
+ return result
76
+
77
+ def __repr__(self) -> str:
78
+ rep = super().__repr__()
79
+ return "lora." + rep
80
+
81
+
82
+ def dispatch_awq(
83
+ target: torch.nn.Module,
84
+ adapter_name: str,
85
+ **kwargs: Any,
86
+ ) -> Optional[torch.nn.Module]:
87
+ new_module = None
88
+
89
+ if isinstance(target, BaseTunerLayer):
90
+ target_base_layer = target.get_base_layer()
91
+ else:
92
+ target_base_layer = target
93
+
94
+ if is_auto_awq_available() and isinstance(target_base_layer, WQLinear_GEMM):
95
+ # Raise the error only at the dispatch level
96
+ AUTOAWQ_MINIMUM_VERSION = packaging.version.parse("0.2.0")
97
+ version_autoawq = packaging.version.parse(importlib_metadata.version("autoawq"))
98
+
99
+ if AUTOAWQ_MINIMUM_VERSION > version_autoawq:
100
+ raise ImportError(
101
+ f"Found an incompatible version of auto-awq. Found version {version_autoawq}, "
102
+ f"but only versions above {AUTOAWQ_MINIMUM_VERSION} are supported for PEFT."
103
+ )
104
+
105
+ new_module = AwqLoraLinear(target, adapter_name, **kwargs)
106
+ target.qweight = target_base_layer.qweight
107
+
108
+ return new_module
env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/bnb.py ADDED
@@ -0,0 +1,508 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from __future__ import annotations
15
+
16
+ import warnings
17
+ from typing import Any, Optional
18
+
19
+ import bitsandbytes as bnb
20
+ import torch
21
+
22
+ from peft.import_utils import is_bnb_4bit_available, is_bnb_available
23
+ from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
24
+ from peft.utils.integrations import dequantize_bnb_weight
25
+ from peft.utils.other import transpose
26
+
27
+ from .layer import LoraLayer
28
+
29
+
30
+ if is_bnb_available():
31
+
32
+ class Linear8bitLt(torch.nn.Module, LoraLayer):
33
+ # Lora implemented in a dense layer
34
+ def __init__(
35
+ self,
36
+ base_layer: torch.nn.Module,
37
+ adapter_name: str,
38
+ r: int = 0,
39
+ lora_alpha: int = 1,
40
+ lora_dropout: float = 0.0,
41
+ init_lora_weights: bool = True,
42
+ use_rslora: bool = False,
43
+ use_dora: bool = False,
44
+ **kwargs,
45
+ ) -> None:
46
+ super().__init__()
47
+ LoraLayer.__init__(self, base_layer)
48
+ self.fan_in_fan_out = False
49
+
50
+ self._active_adapter = adapter_name
51
+ self.update_layer(
52
+ adapter_name,
53
+ r,
54
+ lora_alpha=lora_alpha,
55
+ lora_dropout=lora_dropout,
56
+ init_lora_weights=init_lora_weights,
57
+ use_rslora=use_rslora,
58
+ use_dora=use_dora,
59
+ )
60
+
61
+ def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
62
+ """
63
+ Merge the active adapter weights into the base weights
64
+
65
+ Args:
66
+ safe_merge (`bool`, *optional*):
67
+ If True, the merge operation will be performed in a copy of the original weights and check for NaNs
68
+ before merging the weights. This is useful if you want to check if the merge operation will produce
69
+ NaNs. Defaults to `False`.
70
+ adapter_names (`list[str]`, *optional*):
71
+ The list of adapter names that should be merged. If None, all active adapters will be merged.
72
+ Defaults to `None`.
73
+ """
74
+ adapter_names = check_adapters_to_merge(self, adapter_names)
75
+ if not adapter_names:
76
+ # no adapter to merge
77
+ return
78
+
79
+ for active_adapter in adapter_names:
80
+ if active_adapter not in self.lora_A.keys():
81
+ continue
82
+
83
+ warnings.warn(
84
+ "Merge lora module to 8-bit linear may get different generations due to rounding errors."
85
+ )
86
+ lora_data = self.get_delta_weight(active_adapter)
87
+
88
+ weight = self.get_base_layer().weight
89
+ state = self.get_base_layer().state
90
+ if state.SCB is None:
91
+ state.SCB = weight.SCB
92
+
93
+ # Dequantize the result of identity matrix and int8 weight because bitsandbytes does not support int8
94
+ # dequantization directly
95
+ output = dequantize_bnb_weight(weight, state=state)
96
+ if not self.use_dora[active_adapter]:
97
+ w_data = output.to(lora_data.dtype).to(lora_data.device) + lora_data
98
+ else:
99
+ # handle dora
100
+ # since output already includes scaling, set it to 1 here
101
+ weight_norm = self._get_weight_norm(output, lora_data, scaling=1).detach()
102
+ # We need to cache weight_norm because it has to be based on the original weights. We
103
+ # cannot calculate it on the fly based on the merged weights when unmerging because its a
104
+ # different value
105
+ self._cache_store(f"{active_adapter}-weight_norm", weight_norm)
106
+ dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm
107
+ w_data = dora_factor.view(-1, 1) * (output + lora_data)
108
+
109
+ if safe_merge and not torch.isfinite(w_data).all():
110
+ raise ValueError(
111
+ f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
112
+ )
113
+
114
+ self.get_base_layer().weight = bnb.nn.Int8Params(
115
+ w_data.to("cpu"), requires_grad=False, has_fp16_weights=weight.has_fp16_weights
116
+ ).to(weight.device)
117
+ state.reset_grads()
118
+ self.merged_adapters.append(active_adapter)
119
+
120
+ def unmerge(self) -> None:
121
+ """
122
+ This method unmerges all merged adapter layers from the base weights.
123
+ """
124
+ if not self.merged:
125
+ warnings.warn("Already unmerged. Nothing to do.")
126
+ return
127
+
128
+ while len(self.merged_adapters) > 0:
129
+ active_adapter = self.merged_adapters.pop()
130
+ if active_adapter not in self.lora_A.keys():
131
+ continue
132
+ warnings.warn(
133
+ "Unmerge lora module to 8-bit linear may get different generations due to rounding errors."
134
+ )
135
+ lora_data = self.get_delta_weight(active_adapter)
136
+
137
+ weight = self.get_base_layer().weight
138
+ state = self.get_base_layer().state
139
+ if state.SCB is None:
140
+ state.SCB = weight.SCB
141
+ output = dequantize_bnb_weight(weight, state=state)
142
+
143
+ if not self.use_dora[active_adapter]:
144
+ w_data = output.to(lora_data.dtype).to(lora_data.device) - lora_data
145
+ else:
146
+ weight_norm = self._cache_pop(f"{active_adapter}-weight_norm")
147
+ dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm
148
+ w_data = output.data / dora_factor.view(-1, 1) - lora_data
149
+
150
+ self.get_base_layer().weight = bnb.nn.Int8Params(
151
+ w_data.to("cpu"), requires_grad=False, has_fp16_weights=weight.has_fp16_weights
152
+ ).to(weight.device)
153
+ state.reset_grads()
154
+
155
+ def get_delta_weight(self, adapter):
156
+ return (
157
+ transpose(
158
+ self.lora_B[adapter].weight @ self.lora_A[adapter].weight,
159
+ False,
160
+ )
161
+ * self.scaling[adapter]
162
+ )
163
+
164
+ def _mixed_batch_forward(
165
+ self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any
166
+ ) -> torch.Tensor:
167
+ # This is a special method that handles the case when users pass the argument `adapter_names`. This is an
168
+ # extra argument that allows mixing different adapters in the same batch at inference time.
169
+ result = self.base_layer(x, *args, **kwargs)
170
+
171
+ unique_adapters = set(adapter_names)
172
+ sub_batch_indices_list = []
173
+ for adapter in unique_adapters:
174
+ sub_batch_indices_list.append([index for index, item in enumerate(adapter_names) if item == adapter])
175
+
176
+ for i, active_adapter in enumerate(unique_adapters):
177
+ if active_adapter == "__base__":
178
+ continue
179
+ if active_adapter not in self.lora_A.keys():
180
+ continue
181
+
182
+ lora_A = self.lora_A[active_adapter]
183
+ lora_B = self.lora_B[active_adapter]
184
+ dropout = self.lora_dropout[active_adapter]
185
+ scaling = self.scaling[active_adapter]
186
+
187
+ requires_conversion = not torch.is_autocast_enabled()
188
+ if requires_conversion:
189
+ expected_dtype = result.dtype
190
+ compute_dtype = lora_A.weight.dtype
191
+ if x.dtype != compute_dtype:
192
+ x = x.to(compute_dtype)
193
+
194
+ # getting the sub-batch, passing it to LoRA layers and updating the corresponding indices of the linear
195
+ # layer output
196
+ sub_batch = x[sub_batch_indices_list[i]]
197
+ output = lora_B(lora_A(dropout(sub_batch))) * scaling
198
+ if requires_conversion:
199
+ output = output.to(expected_dtype)
200
+ result[sub_batch_indices_list[i]] += output
201
+
202
+ return result
203
+
204
+ def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
205
+ self._check_forward_args(x, *args, **kwargs)
206
+ adapter_names = kwargs.pop("adapter_names", None)
207
+
208
+ if self.disable_adapters:
209
+ if self.merged:
210
+ self.unmerge()
211
+ result = self.base_layer(x, *args, **kwargs)
212
+ elif adapter_names is not None:
213
+ result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs)
214
+ elif self.merged:
215
+ result = self.base_layer(x, *args, **kwargs)
216
+ else:
217
+ result = self.base_layer(x, *args, **kwargs)
218
+ for active_adapter in self.active_adapters:
219
+ if active_adapter not in self.lora_A.keys():
220
+ continue
221
+ lora_A = self.lora_A[active_adapter]
222
+ lora_B = self.lora_B[active_adapter]
223
+ dropout = self.lora_dropout[active_adapter]
224
+ scaling = self.scaling[active_adapter]
225
+
226
+ requires_conversion = not torch.is_autocast_enabled()
227
+ if requires_conversion:
228
+ expected_dtype = result.dtype
229
+ compute_dtype = lora_A.weight.dtype
230
+ if x.dtype != compute_dtype:
231
+ x = x.to(compute_dtype)
232
+
233
+ if not self.use_dora[active_adapter]:
234
+ output = lora_B(lora_A(dropout(x))) * scaling
235
+ else:
236
+ output = self._apply_dora(x, lora_A, lora_B, scaling, active_adapter)
237
+ if requires_conversion:
238
+ output = output.to(expected_dtype)
239
+
240
+ result = result + output
241
+
242
+ return result
243
+
244
+ def __repr__(self) -> str:
245
+ rep = super().__repr__()
246
+ return "lora." + rep
247
+
248
+ def dispatch_bnb_8bit(target: torch.nn.Module, adapter_name: str, **kwargs):
249
+ new_module = None
250
+
251
+ if isinstance(target, BaseTunerLayer):
252
+ target_base_layer = target.get_base_layer()
253
+ else:
254
+ target_base_layer = target
255
+
256
+ loaded_in_8bit = kwargs.get("loaded_in_8bit", False)
257
+ if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt):
258
+ eightbit_kwargs = kwargs.copy()
259
+ eightbit_kwargs.update(
260
+ {
261
+ "has_fp16_weights": target.state.has_fp16_weights,
262
+ "memory_efficient_backward": target.state.memory_efficient_backward,
263
+ "threshold": target.state.threshold,
264
+ "index": target.index,
265
+ }
266
+ )
267
+ new_module = Linear8bitLt(target, adapter_name, **eightbit_kwargs)
268
+
269
+ return new_module
270
+
271
+
272
+ if is_bnb_4bit_available():
273
+
274
+ class Linear4bit(torch.nn.Module, LoraLayer):
275
+ # Lora implemented in a dense layer
276
+ def __init__(
277
+ self,
278
+ base_layer: torch.nn.Module,
279
+ adapter_name: str,
280
+ r: int = 0,
281
+ lora_alpha: int = 1,
282
+ lora_dropout: float = 0.0,
283
+ init_lora_weights: bool = True,
284
+ use_rslora: bool = False,
285
+ use_dora: bool = False,
286
+ **kwargs,
287
+ ) -> None:
288
+ super().__init__()
289
+ LoraLayer.__init__(self, base_layer)
290
+ self.fan_in_fan_out = False
291
+
292
+ self._active_adapter = adapter_name
293
+ self.update_layer(
294
+ adapter_name,
295
+ r,
296
+ lora_alpha=lora_alpha,
297
+ lora_dropout=lora_dropout,
298
+ init_lora_weights=init_lora_weights,
299
+ use_rslora=use_rslora,
300
+ use_dora=use_dora,
301
+ )
302
+
303
+ def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
304
+ """
305
+ Merge the active adapter weights into the base weights
306
+
307
+ Args:
308
+ safe_merge (`bool`, *optional*):
309
+ If True, the merge operation will be performed in a copy of the original weights and check for NaNs
310
+ before merging the weights. This is useful if you want to check if the merge operation will produce
311
+ NaNs. Defaults to `False`.
312
+ adapter_names (`list[str]`, *optional*):
313
+ The list of adapter names that should be merged. If None, all active adapters will be merged.
314
+ Defaults to `None`.
315
+ """
316
+ adapter_names = check_adapters_to_merge(self, adapter_names)
317
+ if not adapter_names:
318
+ # no adapter to merge
319
+ return
320
+
321
+ for active_adapter in adapter_names:
322
+ if active_adapter not in self.lora_A.keys():
323
+ continue
324
+
325
+ warnings.warn(
326
+ "Merge lora module to 4-bit linear may get different generations due to rounding errors."
327
+ )
328
+ # Refer to https://gist.github.com/ChrisHayduk/1a53463331f52dca205e55982baf9930
329
+ weight = self.get_base_layer().weight
330
+ kwargs = weight.__dict__
331
+ lora_data = self.get_delta_weight(active_adapter)
332
+
333
+ output = dequantize_bnb_weight(weight, state=weight.quant_state)
334
+ if not self.use_dora[active_adapter]:
335
+ w_data = output + lora_data
336
+ else:
337
+ # handle dora
338
+ # since output already includes scaling, set it to 1 here
339
+ weight_norm = self._get_weight_norm(output, lora_data, scaling=1).detach()
340
+ # We need to cache weight_norm because it has to be based on the original weights. We
341
+ # cannot calculate it on the fly based on the merged weights when unmerging because its a
342
+ # different value
343
+ self._cache_store(f"{active_adapter}-weight_norm", weight_norm)
344
+ dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm
345
+ w_data = dora_factor.view(-1, 1) * (output + lora_data)
346
+
347
+ if safe_merge and not torch.isfinite(w_data).all():
348
+ raise ValueError(
349
+ f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
350
+ )
351
+ if "bnb_quantized" in kwargs:
352
+ kwargs["bnb_quantized"] = False
353
+ self.get_base_layer().weight = bnb.nn.Params4bit(w_data.to("cpu"), requires_grad=False, **kwargs).to(
354
+ weight.device
355
+ )
356
+ self.merged_adapters.append(active_adapter)
357
+
358
+ def unmerge(self) -> None:
359
+ """
360
+ This method unmerges all merged adapter layers from the base weights.
361
+ """
362
+ if not self.merged:
363
+ warnings.warn("Already unmerged. Nothing to do.")
364
+ return
365
+
366
+ while len(self.merged_adapters) > 0:
367
+ active_adapter = self.merged_adapters.pop()
368
+ if active_adapter not in self.lora_A.keys():
369
+ continue
370
+ warnings.warn(
371
+ "Unmerge lora module to 4-bit linear may get different generations due to rounding errors."
372
+ )
373
+
374
+ lora_data = self.get_delta_weight(active_adapter)
375
+ weight = self.get_base_layer().weight
376
+ kwargs = weight.__dict__
377
+ output = dequantize_bnb_weight(weight, state=weight.quant_state)
378
+
379
+ if not self.use_dora[active_adapter]:
380
+ w_data = output - lora_data
381
+ else:
382
+ weight_norm = self._cache_pop(f"{active_adapter}-weight_norm")
383
+ dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm
384
+ w_data = output.data / dora_factor.view(-1, 1) - lora_data
385
+
386
+ if "bnb_quantized" in kwargs:
387
+ kwargs["bnb_quantized"] = False
388
+ self.get_base_layer().weight = bnb.nn.Params4bit(w_data.to("cpu"), requires_grad=False, **kwargs).to(
389
+ weight.device
390
+ )
391
+
392
+ def get_delta_weight(self, adapter):
393
+ return (
394
+ transpose(
395
+ self.lora_B[adapter].weight @ self.lora_A[adapter].weight,
396
+ False,
397
+ )
398
+ * self.scaling[adapter]
399
+ )
400
+
401
+ def _mixed_batch_forward(
402
+ self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any
403
+ ) -> torch.Tensor:
404
+ # This is a special method that handles the case when users pass the argument `adapter_names`. This is an
405
+ # extra argument that allows mixing different adapters in the same batch at inference time.
406
+ result = self.base_layer(x, *args, **kwargs)
407
+
408
+ unique_adapters = set(adapter_names)
409
+ sub_batch_indices_list = []
410
+ for adapter in unique_adapters:
411
+ sub_batch_indices_list.append([index for index, item in enumerate(adapter_names) if item == adapter])
412
+
413
+ for i, active_adapter in enumerate(unique_adapters):
414
+ if active_adapter == "__base__":
415
+ continue
416
+ if active_adapter not in self.lora_A.keys():
417
+ continue
418
+
419
+ lora_A = self.lora_A[active_adapter]
420
+ lora_B = self.lora_B[active_adapter]
421
+ dropout = self.lora_dropout[active_adapter]
422
+ scaling = self.scaling[active_adapter]
423
+
424
+ requires_conversion = not torch.is_autocast_enabled()
425
+ if requires_conversion:
426
+ expected_dtype = result.dtype
427
+ x = x.to(lora_A.weight.dtype)
428
+
429
+ # getting the sub-batch, passing it to LoRA layers and updating the corresponding indices of the linear
430
+ # layer output
431
+ sub_batch = x[sub_batch_indices_list[i]]
432
+ output = lora_B(lora_A(dropout(sub_batch))) * scaling
433
+ if requires_conversion:
434
+ output = output.to(expected_dtype)
435
+ result[sub_batch_indices_list[i]] += output
436
+
437
+ return result
438
+
439
+ def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
440
+ self._check_forward_args(x, *args, **kwargs)
441
+ adapter_names = kwargs.pop("adapter_names", None)
442
+
443
+ if self.disable_adapters:
444
+ if self.merged:
445
+ self.unmerge()
446
+ result = self.base_layer(x, *args, **kwargs)
447
+ elif adapter_names is not None:
448
+ result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs)
449
+ elif self.merged:
450
+ result = self.base_layer(x, *args, **kwargs)
451
+ else:
452
+ result = self.base_layer(x, *args, **kwargs)
453
+ # As per Tim Dettmers, for 4bit, we need to defensively clone here.
454
+ # The reason is that in some cases, an error can occur that backprop
455
+ # does not work on a manipulated view. This issue may be solved with
456
+ # newer PyTorch versions but this would need extensive testing to be
457
+ # sure.
458
+ result = result.clone()
459
+
460
+ for active_adapter in self.active_adapters:
461
+ if active_adapter not in self.lora_A.keys():
462
+ continue
463
+ lora_A = self.lora_A[active_adapter]
464
+ lora_B = self.lora_B[active_adapter]
465
+ dropout = self.lora_dropout[active_adapter]
466
+ scaling = self.scaling[active_adapter]
467
+
468
+ requires_conversion = not torch.is_autocast_enabled()
469
+ if requires_conversion:
470
+ expected_dtype = result.dtype
471
+ x = x.to(lora_A.weight.dtype)
472
+
473
+ if not self.use_dora[active_adapter]:
474
+ output = lora_B(lora_A(dropout(x))) * scaling
475
+ else:
476
+ output = self._apply_dora(x, lora_A, lora_B, scaling, active_adapter)
477
+ if requires_conversion:
478
+ output = output.to(expected_dtype)
479
+
480
+ result = result + output
481
+
482
+ return result
483
+
484
+ def __repr__(self) -> str:
485
+ rep = super().__repr__()
486
+ return "lora." + rep
487
+
488
+ def dispatch_bnb_4bit(target: torch.nn.Module, adapter_name: str, **kwargs):
489
+ new_module = None
490
+
491
+ if isinstance(target, BaseTunerLayer):
492
+ target_base_layer = target.get_base_layer()
493
+ else:
494
+ target_base_layer = target
495
+
496
+ loaded_in_4bit = kwargs.get("loaded_in_4bit", False)
497
+ if loaded_in_4bit and is_bnb_4bit_available() and isinstance(target_base_layer, bnb.nn.Linear4bit):
498
+ fourbit_kwargs = kwargs.copy()
499
+ fourbit_kwargs.update(
500
+ {
501
+ "compute_dtype": target_base_layer.compute_dtype,
502
+ "compress_statistics": target_base_layer.weight.compress_statistics,
503
+ "quant_type": target_base_layer.weight.quant_type,
504
+ }
505
+ )
506
+ new_module = Linear4bit(target, adapter_name, **fourbit_kwargs)
507
+
508
+ return new_module
env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/config.py ADDED
@@ -0,0 +1,299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from __future__ import annotations
16
+
17
+ from dataclasses import dataclass, field
18
+ from typing import Literal, Optional, Union
19
+
20
+ from peft.config import PeftConfig
21
+ from peft.utils import PeftType
22
+
23
+
24
+ @dataclass
25
+ class LoftQConfig:
26
+ """
27
+ This is the sub-configuration class to store the configuration of a [`LoraModel`].
28
+
29
+ Args:
30
+ bits_pattern (`dict`): The mapping from layer names or regexp expression to bits which are different from the
31
+ default bits specified by `bits`. For example, `{model.decoder.layers.0.encoder_attn.k_proj: 2`}.
32
+ bits (`int`): Quantization bits for LoftQ.
33
+ iter (`int`): Alternating iterations for LoftQ.
34
+ fake (`bool`): True: use fp16/fp32; used for first time to save weights. False: use bitsandbytes 4bit linear
35
+ models. weights can't be saved. Recommend to set to True, save the weights and load the saved weights in 4
36
+ bits.
37
+ """
38
+
39
+ loftq_bits: int = field(default=4, metadata={"help": "Quantization bits for LoftQ"})
40
+ loftq_iter: int = field(default=1, metadata={"help": "Alternating iterations for LoftQ"})
41
+
42
+
43
+ @dataclass
44
+ class LoraConfig(PeftConfig):
45
+ """
46
+ This is the configuration class to store the configuration of a [`LoraModel`].
47
+
48
+ Args:
49
+ r (`int`):
50
+ Lora attention dimension (the "rank").
51
+ target_modules (`Optional[Union[List[str], str]]`):
52
+ The names of the modules to apply the adapter to. If this is specified, only the modules with the specified
53
+ names will be replaced. When passing a string, a regex match will be performed. When passing a list of
54
+ strings, either an exact match will be performed or it is checked if the name of the module ends with any
55
+ of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen,
56
+ excluding the output layer. If this is not specified, modules will be chosen according to the model
57
+ architecture. If the architecture is not known, an error will be raised -- in this case, you should specify
58
+ the target modules manually.
59
+ lora_alpha (`int`):
60
+ The alpha parameter for Lora scaling.
61
+ lora_dropout (`float`):
62
+ The dropout probability for Lora layers.
63
+ fan_in_fan_out (`bool`):
64
+ Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses
65
+ `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`.
66
+ bias (`str`):
67
+ Bias type for LoRA. Can be 'none', 'all' or 'lora_only'. If 'all' or 'lora_only', the corresponding biases
68
+ will be updated during training. Be aware that this means that, even when disabling the adapters, the model
69
+ will not produce the same output as the base model would have without adaptation.
70
+ use_rslora (`bool`):
71
+ When set to True, uses <a href='https://doi.org/10.48550/arXiv.2312.03732'>Rank-Stabilized LoRA</a> which
72
+ sets the adapter scaling factor to `lora_alpha/math.sqrt(r)`, since it was proven to work better.
73
+ Otherwise, it will use the original default value of `lora_alpha/r`.
74
+ modules_to_save (`List[str]`):
75
+ List of modules apart from adapter layers to be set as trainable and saved in the final checkpoint.
76
+ init_lora_weights (`bool` | `Literal["gaussian", "loftq"]`):
77
+ How to initialize the weights of the adapter layers. Passing True (default) results in the default
78
+ initialization from the reference implementation from Microsoft. Passing 'gaussian' results in Gaussian
79
+ initialization scaled by the LoRA rank for linear and layers. Setting the initialization to False leads to
80
+ completely random initialization and is discouraged. Pass `'loftq'` to use LoftQ initialization.
81
+ layers_to_transform (`Union[List[int], int]`):
82
+ The layer indices to transform. If a list of ints is passed, it will apply the adapter to the layer indices
83
+ that are specified in this list. If a single integer is passed, it will apply the transformations on the
84
+ layer at this index.
85
+ layers_pattern (`str`):
86
+ The layer pattern name, used only if `layers_to_transform` is different from `None`.
87
+ rank_pattern (`dict`):
88
+ The mapping from layer names or regexp expression to ranks which are different from the default rank
89
+ specified by `r`.
90
+ alpha_pattern (`dict`):
91
+ The mapping from layer names or regexp expression to alphas which are different from the default alpha
92
+ specified by `lora_alpha`.
93
+ megatron_config (`Optional[dict]`):
94
+ The TransformerConfig arguments for Megatron. It is used to create LoRA's parallel linear layer. You can
95
+ get it like this, `core_transformer_config_from_args(get_args())`, these two functions being from Megatron.
96
+ The arguments will be used to initialize the TransformerConfig of Megatron. You need to specify this
97
+ parameter when you want to apply LoRA to the ColumnParallelLinear and RowParallelLinear layers of megatron.
98
+ megatron_core (`Optional[str]`):
99
+ The core module from Megatron to use, defaults to `"megatron.core"`.
100
+ loftq_config (`Optional[LoftQConfig]`):
101
+ The configuration of LoftQ. If this is not None, then LoftQ will be used to quantize the backbone weights
102
+ and initialize Lora layers. Also pass `init_lora_weights='loftq'`. Note that you should not pass a
103
+ quantized model in this case, as LoftQ will quantize the model itself.
104
+ use_dora (`bool`):
105
+ Enable 'Weight-Decomposed Low-Rank Adaptation' (DoRA). This technique decomposes the updates of the weights
106
+ into two parts, magnitude and direction. Direction is handled by normal LoRA, whereas the magnitude is
107
+ handled by a separate learnable parameter. This can improve the performance of LoRA especially at low
108
+ ranks. Right now, DoRA only supports linear and Conv2D layers. DoRA introduces a bigger overhead than pure
109
+ LoRA, so it is recommended to merge weights for inference. For more information, see
110
+ https://arxiv.org/abs/2402.09353.
111
+ layer_replication(`List[Tuple[int, int]]`):
112
+ Build a new stack of layers by stacking the original model layers according to the ranges specified. This
113
+ allows expanding (or shrinking) the model without duplicating the base model weights. The new layers will
114
+ all have separate LoRA adapters attached to them.
115
+ """
116
+
117
+ r: int = field(default=8, metadata={"help": "Lora attention dimension"})
118
+ target_modules: Optional[Union[list[str], str]] = field(
119
+ default=None,
120
+ metadata={
121
+ "help": (
122
+ "List of module names or regex expression of the module names to replace with LoRA."
123
+ "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'."
124
+ "This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer."
125
+ "If not specified, modules will be chosen according to the model architecture, If the architecture is "
126
+ "not known, an error will be raised -- in this case, you should specify the target modules manually."
127
+ ),
128
+ },
129
+ )
130
+ lora_alpha: int = field(default=8, metadata={"help": "Lora alpha"})
131
+ lora_dropout: float = field(default=0.0, metadata={"help": "Lora dropout"})
132
+ fan_in_fan_out: bool = field(
133
+ default=False,
134
+ metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"},
135
+ )
136
+ bias: Literal["none", "all", "lora_only"] = field(
137
+ default="none", metadata={"help": "Bias type for Lora. Can be 'none', 'all' or 'lora_only'"}
138
+ )
139
+ use_rslora: bool = field(
140
+ default=False,
141
+ metadata={
142
+ "help": (
143
+ "When set to True, uses Rank-Stabilized LoRA doi.org/10.48550/arXiv.2312.03732"
144
+ " which sets the adapter scaling factor to `lora_alpha/math.sqrt(r)`, since it"
145
+ " was proven to work better. Otherwise, it will use the original default"
146
+ " value of `lora_alpha/r`."
147
+ )
148
+ },
149
+ )
150
+ modules_to_save: Optional[list[str]] = field(
151
+ default=None,
152
+ metadata={
153
+ "help": "List of modules apart from LoRA layers to be set as trainable and saved in the final checkpoint. "
154
+ "For example, in Sequence Classification or Token Classification tasks, "
155
+ "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved."
156
+ },
157
+ )
158
+ init_lora_weights: bool | Literal["gaussian", "loftq"] = field(
159
+ default=True,
160
+ metadata={
161
+ "help": (
162
+ "How to initialize the weights of the LoRA layers. Passing True (default) results in the default "
163
+ "initialization from the reference implementation from Microsoft. Passing 'gaussian' results "
164
+ "in Gaussian initialization scaled by the LoRA rank for linear and layers. Setting the initialization "
165
+ "to False leads to completely random initialization and is discouraged."
166
+ "Pass `'loftq'` to use LoftQ initialization"
167
+ ),
168
+ },
169
+ )
170
+ layers_to_transform: Optional[Union[list[int], int]] = field(
171
+ default=None,
172
+ metadata={
173
+ "help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index. "
174
+ "This only works when target_modules is a list of str."
175
+ },
176
+ )
177
+ layers_pattern: Optional[Union[list[str], str]] = field(
178
+ default=None,
179
+ metadata={
180
+ "help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern."
181
+ "This only works when target_modules is a list of str."
182
+ },
183
+ )
184
+ rank_pattern: Optional[dict] = field(
185
+ default_factory=dict,
186
+ metadata={
187
+ "help": (
188
+ "The mapping from layer names or regexp expression to ranks which are different from the default rank specified by `r`. "
189
+ "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 8`}"
190
+ )
191
+ },
192
+ )
193
+ alpha_pattern: Optional[dict] = field(
194
+ default_factory=dict,
195
+ metadata={
196
+ "help": (
197
+ "The mapping from layer names or regexp expression to alphas which are different from the default alpha specified by `lora_alpha`. "
198
+ "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 32`}"
199
+ )
200
+ },
201
+ )
202
+ megatron_config: Optional[dict] = field(
203
+ default=None,
204
+ metadata={
205
+ "help": (
206
+ "The TransformerConfig from Megatron. It is used to create LoRA's parallel linear layer."
207
+ "You can get it like this, `core_transformer_config_from_args(get_args())`, "
208
+ "these two functions being from Megatron."
209
+ "You need to specify this parameter when you want to apply LoRA to the ColumnParallelLinear and "
210
+ "RowParallelLinear layers of megatron."
211
+ "It should be noted that we may not be able to use the `save_pretrained` and `from_pretrained` "
212
+ "functions, because TransformerConfig may not necessarily be serialized."
213
+ "But when using megatron, we can use `get_peft_model_state_dict` function and "
214
+ "megatron's framework, they can also save and load models and configurations."
215
+ )
216
+ },
217
+ )
218
+ megatron_core: Optional[str] = field(
219
+ default="megatron.core",
220
+ metadata={
221
+ "help": (
222
+ "The core module from Megatron, it is used to create LoRA's parallel linear layer. "
223
+ "It only needs to be passed in when you need to use your own modified megatron core module. "
224
+ "Otherwise, it will use the default value `megatron.core`. "
225
+ )
226
+ },
227
+ )
228
+ # dict type is used when loading config.json
229
+ loftq_config: Union[LoftQConfig, dict] = field(
230
+ default_factory=dict,
231
+ metadata={
232
+ "help": (
233
+ "The configuration of LoftQ. If this is passed, then LoftQ will be used to quantize the backbone "
234
+ "weights and initialize Lora layers. Also set `init_lora_weights='loftq'` in this case."
235
+ )
236
+ },
237
+ )
238
+ use_dora: bool = field(
239
+ default=False,
240
+ metadata={
241
+ "help": (
242
+ "Enable 'Weight-Decomposed Low-Rank Adaptation' (DoRA). This technique decomposes the updates of the "
243
+ "weights into two parts, magnitude and direction. Direction is handled by normal LoRA, whereas the "
244
+ "magnitude is handled by a separate learnable parameter. This can improve the performance of LoRA, "
245
+ "especially at low ranks. Right now, DoRA only supports linear and Conv2D layers. DoRA introduces a bigger"
246
+ "overhead than pure LoRA, so it is recommended to merge weights for inference. For more information, "
247
+ "see https://arxiv.org/abs/2402.09353."
248
+ )
249
+ },
250
+ )
251
+ # Enables replicating layers in a model to expand it to a larger model.
252
+ layer_replication: Optional[list[tuple[int, int]]] = field(
253
+ default=None,
254
+ metadata={
255
+ "help": (
256
+ "This enables using LoRA to effectively expand a transformer model to a larger size by repeating some layers. "
257
+ "The transformation handles models (currently Llama, Bert or Falcon compatible architectures) with "
258
+ "a module list in the model which it modifies to expand the number of modules. "
259
+ "Base weights are shared so the memory usage is close to the original model. The intended use is these base weights "
260
+ "remain fixed during finetuning but each layer has a separate LoRA adapter so the layers can be specialed via "
261
+ "the adapter layers fit during fine tuning."
262
+ "The format is a list of [start, end) pairs which specify the layer ranges to stack. For example:\n"
263
+ " Original model has 5 layers labelled by their position in the model: `[0, 1, 2, 3, 4]`\n"
264
+ " layer_replication: `[[0, 4], [2, 5]]`\n"
265
+ " Final model will have this arrangement of original layers: `[0, 1, 2, 3, 2, 3, 4]`\n"
266
+ "This format is based on what is used for pass-through merges in mergekit. It makes it simple to select sequential "
267
+ "ranges of a model and stack them while reusing layers at either end of each sequence."
268
+ )
269
+ },
270
+ )
271
+
272
+ def __post_init__(self):
273
+ self.peft_type = PeftType.LORA
274
+ self.target_modules = (
275
+ set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
276
+ )
277
+ # if target_modules is a regex expression, then layers_to_transform should be None
278
+ if isinstance(self.target_modules, str) and self.layers_to_transform is not None:
279
+ raise ValueError("`layers_to_transform` cannot be used when `target_modules` is a str.")
280
+
281
+ # if target_modules is a regex expression, then layers_pattern should be None
282
+ if isinstance(self.target_modules, str) and self.layers_pattern is not None:
283
+ raise ValueError("`layers_pattern` cannot be used when `target_modules` is a str.")
284
+
285
+ if self.use_dora and self.megatron_config:
286
+ raise ValueError("DoRA does not support megatron_core, please set `use_dora=False`.")
287
+
288
+ # handle init_lora_weights and loftq_config
289
+ if self.init_lora_weights == "loftq":
290
+ import importlib
291
+
292
+ if not importlib.util.find_spec("scipy"):
293
+ raise ImportError("The required package 'scipy' is not installed. Please install it to continue.")
294
+ if self.loftq_config is None:
295
+ raise ValueError("`loftq_config` must be specified when `init_lora_weights` is 'loftq'.")
296
+
297
+ # convert loftq_config to dict
298
+ if self.loftq_config and not isinstance(self.loftq_config, dict):
299
+ self.loftq_config = vars(self.loftq_config)
env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/gptq.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Any, Optional
16
+
17
+ import torch
18
+
19
+ from peft.tuners.lora.layer import LoraLayer
20
+ from peft.tuners.tuners_utils import BaseTunerLayer
21
+ from peft.utils import get_auto_gptq_quant_linear
22
+
23
+
24
+ class QuantLinear(torch.nn.Module, LoraLayer):
25
+ def __init__(
26
+ self,
27
+ base_layer,
28
+ adapter_name: str,
29
+ r: int = 0,
30
+ lora_alpha: int = 1,
31
+ lora_dropout: float = 0.0,
32
+ init_lora_weights: bool = True,
33
+ use_rslora: bool = False,
34
+ use_dora: bool = False,
35
+ **kwargs,
36
+ ):
37
+ super().__init__()
38
+ LoraLayer.__init__(self, base_layer)
39
+
40
+ if use_dora:
41
+ raise ValueError(f"{self.__class__.__name__} does not support DoRA yet, please set it to False")
42
+
43
+ # self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter
44
+ # for backwards compatibility
45
+ self.quant_linear_module = base_layer
46
+ self._active_adapter = adapter_name
47
+ self.update_layer(
48
+ adapter_name,
49
+ r,
50
+ lora_alpha=lora_alpha,
51
+ lora_dropout=lora_dropout,
52
+ init_lora_weights=init_lora_weights,
53
+ use_rslora=use_rslora,
54
+ use_dora=use_dora,
55
+ )
56
+
57
+ def forward(self, x: torch.Tensor):
58
+ # note: logic differs from default Linear because merging is not supported
59
+ result = self.quant_linear_module(x)
60
+
61
+ if self.disable_adapters:
62
+ return result
63
+
64
+ for active_adapter in self.active_adapters:
65
+ if active_adapter not in self.lora_A.keys():
66
+ continue
67
+ lora_A = self.lora_A[active_adapter]
68
+ lora_B = self.lora_B[active_adapter]
69
+ dropout = self.lora_dropout[active_adapter]
70
+ scaling = self.scaling[active_adapter]
71
+
72
+ requires_conversion = not torch.is_autocast_enabled()
73
+ if requires_conversion:
74
+ expected_dtype = result.dtype
75
+ x = x.to(lora_A.weight.dtype)
76
+
77
+ output = lora_B(lora_A(dropout(x)))
78
+ if requires_conversion:
79
+ output = output.to(expected_dtype)
80
+ output = output * scaling
81
+ result += output
82
+ return result
83
+
84
+ def __repr__(self) -> str:
85
+ rep = super().__repr__()
86
+ return "lora." + rep
87
+
88
+ # TODO: Check if it is better as suggested by users https://github.com/PanQiWei/AutoGPTQ/pull/102
89
+ # def reset_lora_parameters(self, adapter_name):
90
+ # if adapter_name in self.lora_A.keys():
91
+ # torch.nn.init.xavier_uniform_(self.lora_A[adapter_name].weight)
92
+ # torch.nn.init.zeros_(self.lora_B[adapter_name].weight)
93
+
94
+
95
+ def dispatch_gptq(
96
+ target: torch.nn.Module,
97
+ adapter_name: str,
98
+ **kwargs: Any,
99
+ ) -> Optional[torch.nn.Module]:
100
+ new_module = None
101
+
102
+ if isinstance(target, BaseTunerLayer):
103
+ target_base_layer = target.get_base_layer()
104
+ else:
105
+ target_base_layer = target
106
+
107
+ gptq_quantization_config = kwargs.get("gptq_quantization_config", None)
108
+ AutoGPTQQuantLinear = get_auto_gptq_quant_linear(gptq_quantization_config)
109
+
110
+ if AutoGPTQQuantLinear is not None and isinstance(target_base_layer, AutoGPTQQuantLinear):
111
+ new_module = QuantLinear(target, adapter_name, **kwargs)
112
+ target.qweight = target_base_layer.qweight
113
+
114
+ return new_module
env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/layer.py ADDED
@@ -0,0 +1,1066 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from __future__ import annotations
15
+
16
+ import math
17
+ import warnings
18
+ from typing import Any, Optional, Union
19
+
20
+ import torch
21
+ import torch.nn as nn
22
+ import torch.nn.functional as F
23
+ from transformers.pytorch_utils import Conv1D
24
+
25
+ from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
26
+ from peft.utils.integrations import dequantize_bnb_weight, gather_params_ctx
27
+ from peft.utils.other import transpose
28
+
29
+ from .config import LoraConfig
30
+
31
+
32
+ class LoraLayer(BaseTunerLayer):
33
+ # All names of layers that may contain (trainable) adapter weights
34
+ adapter_layer_names = ("lora_A", "lora_B", "lora_embedding_A", "lora_embedding_B")
35
+ # All names of other parameters that may contain adapter-related parameters
36
+ other_param_names = ("r", "lora_alpha", "scaling", "lora_dropout")
37
+
38
+ def __init__(self, base_layer: nn.Module, **kwargs) -> None:
39
+ self.base_layer = base_layer
40
+ self.r = {}
41
+ self.lora_alpha = {}
42
+ self.scaling = {}
43
+ self.lora_dropout = nn.ModuleDict({})
44
+ self.lora_A = nn.ModuleDict({})
45
+ self.lora_B = nn.ModuleDict({})
46
+ # For Embedding layer
47
+ self.lora_embedding_A = nn.ParameterDict({})
48
+ self.lora_embedding_B = nn.ParameterDict({})
49
+ # Mark the weight as unmerged
50
+ self._disable_adapters = False
51
+ self.merged_adapters = []
52
+ self.use_dora: dict[str, bool] = {}
53
+ self.lora_magnitude_vector: Optional[torch.nn.ParameterDict] = None # for DoRA
54
+ self._caches: dict[str, Any] = {}
55
+ self.kwargs = kwargs
56
+
57
+ base_layer = self.get_base_layer()
58
+ if isinstance(base_layer, nn.Linear):
59
+ in_features, out_features = base_layer.in_features, base_layer.out_features
60
+ elif isinstance(base_layer, nn.Conv2d):
61
+ in_features, out_features = base_layer.in_channels, base_layer.out_channels
62
+ elif isinstance(base_layer, nn.Embedding):
63
+ in_features, out_features = base_layer.num_embeddings, base_layer.embedding_dim
64
+ elif isinstance(base_layer, Conv1D):
65
+ in_features, out_features = (
66
+ base_layer.weight.ds_shape if hasattr(base_layer.weight, "ds_shape") else base_layer.weight.shape
67
+ )
68
+ elif hasattr(base_layer, "infeatures") and hasattr(base_layer, "outfeatures"):
69
+ # QuantLinear
70
+ in_features, out_features = base_layer.infeatures, base_layer.outfeatures
71
+ elif hasattr(base_layer, "input_size") and hasattr(base_layer, "output_size"):
72
+ # Megatron ColumnParallelLinear,RowParallelLinear
73
+ in_features, out_features = base_layer.input_size, base_layer.output_size
74
+ elif hasattr(base_layer, "codebooks") and base_layer.__class__.__name__ == "QuantizedLinear":
75
+ # AQLM QuantLinear
76
+ in_features, out_features = base_layer.in_features, base_layer.out_features
77
+ elif hasattr(base_layer, "w_bit") and base_layer.__class__.__name__ == "WQLinear_GEMM":
78
+ # Awq layers
79
+ in_features, out_features = base_layer.in_features, base_layer.out_features
80
+ else:
81
+ raise ValueError(f"Unsupported layer type {type(base_layer)}")
82
+
83
+ self.in_features = in_features
84
+ self.out_features = out_features
85
+
86
+ def update_layer(
87
+ self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora: bool = False
88
+ ):
89
+ # This code works for linear layers, override for other layer types
90
+ if r <= 0:
91
+ raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
92
+
93
+ self.r[adapter_name] = r
94
+ self.lora_alpha[adapter_name] = lora_alpha
95
+ if lora_dropout > 0.0:
96
+ lora_dropout_layer = nn.Dropout(p=lora_dropout)
97
+ else:
98
+ lora_dropout_layer = nn.Identity()
99
+
100
+ self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer}))
101
+ # Actual trainable parameters
102
+ self.lora_A[adapter_name] = nn.Linear(self.in_features, r, bias=False)
103
+ self.lora_B[adapter_name] = nn.Linear(r, self.out_features, bias=False)
104
+ if use_rslora:
105
+ self.scaling[adapter_name] = lora_alpha / math.sqrt(r)
106
+ else:
107
+ self.scaling[adapter_name] = lora_alpha / r
108
+
109
+ if init_lora_weights == "loftq":
110
+ self.loftq_init(adapter_name)
111
+ elif init_lora_weights:
112
+ self.reset_lora_parameters(adapter_name, init_lora_weights)
113
+
114
+ # check weight and qweight (for GPTQ)
115
+ for weight_name in ("weight", "qweight"):
116
+ weight = getattr(self.get_base_layer(), weight_name, None)
117
+ if weight is not None:
118
+ # the layer is already completely initialized, this is an update
119
+ if weight.dtype.is_floating_point or weight.dtype.is_complex:
120
+ self.to(weight.device, dtype=weight.dtype)
121
+ else:
122
+ self.to(weight.device)
123
+ break
124
+
125
+ if use_dora:
126
+ self.dora_init(adapter_name)
127
+ self.use_dora[adapter_name] = True
128
+ else:
129
+ self.use_dora[adapter_name] = False
130
+
131
+ self.set_adapter(self.active_adapters)
132
+
133
+ def reset_lora_parameters(self, adapter_name, init_lora_weights):
134
+ if init_lora_weights is False:
135
+ return
136
+
137
+ if adapter_name in self.lora_A.keys():
138
+ if init_lora_weights is True:
139
+ # initialize A the same way as the default for nn.Linear and B to zero
140
+ # https://github.com/microsoft/LoRA/blob/a0a92e0f26c067cf94747bdbf1ce73793fa44d19/loralib/layers.py#L124
141
+ nn.init.kaiming_uniform_(self.lora_A[adapter_name].weight, a=math.sqrt(5))
142
+ elif init_lora_weights.lower() == "gaussian":
143
+ nn.init.normal_(self.lora_A[adapter_name].weight, std=1 / self.r[adapter_name])
144
+ else:
145
+ raise ValueError(f"Unknown initialization {init_lora_weights=}")
146
+ nn.init.zeros_(self.lora_B[adapter_name].weight)
147
+ if adapter_name in self.lora_embedding_A.keys():
148
+ # initialize a the same way as the default for nn.linear and b to zero
149
+ nn.init.zeros_(self.lora_embedding_A[adapter_name])
150
+ nn.init.normal_(self.lora_embedding_B[adapter_name])
151
+
152
+ def loftq_init(self, adapter_name):
153
+ from peft.utils.loftq_utils import loftq_init
154
+
155
+ weight = self.get_base_layer().weight
156
+ kwargs = {
157
+ "num_bits": self.kwargs.get("loftq_bits", 4),
158
+ "reduced_rank": self.r[adapter_name],
159
+ "num_iter": self.kwargs.get("loftq_iter", 1),
160
+ }
161
+
162
+ qweight, lora_A, lora_B = loftq_init(weight, **kwargs)
163
+ if adapter_name in self.lora_A.keys():
164
+ # initialize A the same way as the default for nn.Linear and B to zero
165
+ self.lora_A[adapter_name].weight.data = lora_A
166
+ self.lora_B[adapter_name].weight.data = lora_B
167
+ if adapter_name in self.lora_embedding_A.keys():
168
+ # initialize a the same way as the default for nn.linear and b to zero
169
+ self.lora_embedding_A[adapter_name].weight.data = lora_A
170
+ self.lora_embedding_B[adapter_name].weight.data = lora_B
171
+ self.get_base_layer().weight.data = qweight
172
+
173
+ def _get_weight_norm(self, weight, lora_weight, scaling) -> torch.Tensor:
174
+ # calculate L2 norm of weight matrix, column-wise
175
+ weight = weight + scaling * lora_weight
176
+ weight_norm = torch.linalg.norm(weight, dim=1).to(weight.dtype)
177
+ return weight_norm
178
+
179
+ def dora_init(self, adapter_name: str) -> None:
180
+ lora_A = self.lora_A[adapter_name]
181
+ lora_B = self.lora_B[adapter_name]
182
+ scaling = self.scaling[adapter_name]
183
+ with gather_params_ctx(self.get_base_layer()):
184
+ weight = self.get_base_layer().weight
185
+ quant_state = getattr(self.get_base_layer(), "state", None)
186
+ weight = dequantize_bnb_weight(weight, state=quant_state) # no-op if not bnb
187
+ if weight.data.ndim == 4: # For handling LoRAs applied to Conv2Ds.
188
+ lora_weight = torch.mm(lora_B.weight.flatten(start_dim=1), lora_A.weight.flatten(start_dim=1))
189
+ lora_weight = lora_weight.reshape(weight.shape)
190
+ else:
191
+ lora_weight = lora_B.weight @ lora_A.weight
192
+ weight_norm = self._get_weight_norm(weight, lora_weight, scaling)
193
+ self.lora_magnitude_vector = nn.ParameterDict()
194
+ self.lora_magnitude_vector[adapter_name] = nn.Parameter(weight_norm, requires_grad=True)
195
+ # add lora_magnitude_vector to the list of learnable parameters
196
+ self.adapter_layer_names = self.adapter_layer_names[:] + ("lora_magnitude_vector",)
197
+
198
+ def _cache_store(self, key: str, value: Any) -> None:
199
+ self._caches[key] = value
200
+
201
+ def _cache_pop(self, key: str) -> Any:
202
+ value = self._caches.pop(key)
203
+ return value
204
+
205
+ def _apply_dora(self, x, lora_A, lora_B, scaling, active_adapter):
206
+ """
207
+ For DoRA, calculate the extra output from LoRA with DoRA applied. This should be added on top of the base layer
208
+ output.
209
+ """
210
+ lora_weight = lora_B.weight @ lora_A.weight
211
+ magnitude = self.lora_magnitude_vector[active_adapter]
212
+ weight = self.get_base_layer().weight
213
+ quant_state = getattr(self.get_base_layer(), "state", None)
214
+ weight = dequantize_bnb_weight(weight, state=quant_state) # no-op if not bnb
215
+ weight = weight.to(x.dtype)
216
+ weight_norm = self._get_weight_norm(weight, lora_weight, scaling)
217
+ # see section 4.3 of DoRA (https://arxiv.org/abs/2402.09353)
218
+ # "[...] we suggest treating ||V +∆V ||_c in
219
+ # Eq. (5) as a constant, thereby detaching it from the gradient
220
+ # graph. This means that while ||V + ∆V ||_c dynamically
221
+ # reflects the updates of ∆V , it won’t receive any gradient
222
+ # during backpropagation"
223
+ weight_norm = weight_norm.detach()
224
+ mag_norm_scale = (magnitude / weight_norm).view(1, -1)
225
+ result_dora = (mag_norm_scale - 1) * (
226
+ F.linear(x, transpose(weight, self.fan_in_fan_out))
227
+ ) + mag_norm_scale * lora_B(lora_A(x)) * scaling
228
+
229
+ # Note: Computation could potentially be accelerated by using the code below instead of calculating X@W again.
230
+ # This is only correct if dropout=0, otherwise results will differ:
231
+ # https://github.com/huggingface/peft/pull/1474#issuecomment-1964682771
232
+ # bias = self.get_base_layer().bias
233
+ # if bias is not None:
234
+ # result = result - bias
235
+ # result = mag_norm_scale * result + mag_norm_scale * lora_B(lora_A(x)) * scaling
236
+ # if bias is not None:
237
+ # result = result + bias
238
+
239
+ return result_dora
240
+
241
+ def set_scale(self, adapter, scale):
242
+ if adapter not in self.scaling:
243
+ # Ignore the case where the adapter is not in the layer
244
+ return
245
+ self.scaling[adapter] = scale * self.lora_alpha[adapter] / self.r[adapter]
246
+
247
+ def scale_layer(self, scale: float) -> None:
248
+ if scale == 1:
249
+ return
250
+
251
+ for active_adapter in self.active_adapters:
252
+ if active_adapter not in self.lora_A.keys():
253
+ continue
254
+
255
+ self.scaling[active_adapter] *= scale
256
+
257
+ def unscale_layer(self, scale=None) -> None:
258
+ for active_adapter in self.active_adapters:
259
+ if active_adapter not in self.lora_A.keys():
260
+ continue
261
+
262
+ if scale is None:
263
+ self.scaling[active_adapter] = self.lora_alpha[active_adapter] / self.r[active_adapter]
264
+ else:
265
+ self.scaling[active_adapter] /= scale
266
+
267
+ def _check_forward_args(self, x, *args, **kwargs):
268
+ """Check if the arguments are compatible with the configs and state of the model"""
269
+ adapter_names = kwargs.get("adapter_names", None)
270
+ if adapter_names is None:
271
+ return
272
+
273
+ if len(x) != len(adapter_names):
274
+ msg = (
275
+ "Length of `adapter_names` should be the same as the number of inputs, but got "
276
+ f"{len(adapter_names)} and {len(x)} respectively."
277
+ )
278
+ raise ValueError(msg)
279
+
280
+ if self.merged:
281
+ # It is unclear what would be the right thing to do if users pass adapter_names and there are merged
282
+ # adapters. Therefore, it is better to raise an error in this case.
283
+ msg = "Cannot pass `adapter_names` when there are merged adapters, please call `unmerge_adapter` first."
284
+ raise ValueError(msg)
285
+
286
+ unique_adapters = set(self.active_adapters)
287
+ for adapter_name in unique_adapters:
288
+ if self.use_dora.get(adapter_name, False):
289
+ msg = "Cannot pass `adapter_names` when DoRA is enabled."
290
+ raise ValueError(msg)
291
+
292
+ def _mixed_batch_forward(
293
+ self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any
294
+ ) -> torch.Tensor:
295
+ # This is a special method that handles the case when users pass the argument `adapter_names`. This is an
296
+ # extra argument that allows mixing different adapters in the same batch at inference time.
297
+ result = self.base_layer(x, *args, **kwargs)
298
+ torch_result_dtype = result.dtype
299
+
300
+ unique_adapters = set(adapter_names)
301
+ sub_batch_indices_list = []
302
+ for adapter in unique_adapters:
303
+ sub_batch_indices_list.append([index for index, item in enumerate(adapter_names) if item == adapter])
304
+
305
+ for i, active_adapter in enumerate(unique_adapters):
306
+ if active_adapter == "__base__":
307
+ continue
308
+ if active_adapter not in self.lora_A.keys():
309
+ continue
310
+
311
+ lora_A = self.lora_A[active_adapter]
312
+ lora_B = self.lora_B[active_adapter]
313
+ dropout = self.lora_dropout[active_adapter]
314
+ scaling = self.scaling[active_adapter]
315
+
316
+ # getting the sub-batch, passing it to LoRA layers and updating the corresponding indices of the linear
317
+ # layer output
318
+ sub_batch = x[sub_batch_indices_list[i]].to(lora_A.weight.dtype)
319
+ lora_output = lora_B(lora_A(dropout(sub_batch))) * scaling
320
+ result[sub_batch_indices_list[i]] += lora_output.to(torch_result_dtype)
321
+
322
+ return result
323
+
324
+
325
+ # Below code is based on https://github.com/microsoft/LoRA/blob/main/loralib/layers.py
326
+ # and modified to work with PyTorch FSDP
327
+
328
+
329
+ # ------------------------------------------------------------------------------------------
330
+ # Copyright (c) Microsoft Corporation. All rights reserved.
331
+ # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
332
+ # ------------------------------------------------------------------------------------------
333
+
334
+
335
+ class Linear(nn.Module, LoraLayer):
336
+ # Lora implemented in a dense layer
337
+ def __init__(
338
+ self,
339
+ base_layer,
340
+ adapter_name: str,
341
+ r: int = 0,
342
+ lora_alpha: int = 1,
343
+ lora_dropout: float = 0.0,
344
+ fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out)
345
+ is_target_conv_1d_layer: bool = False,
346
+ init_lora_weights: Union[bool, str] = True,
347
+ use_rslora: bool = False,
348
+ use_dora: bool = False,
349
+ **kwargs,
350
+ ) -> None:
351
+ super().__init__()
352
+ LoraLayer.__init__(self, base_layer, **kwargs)
353
+ self.fan_in_fan_out = fan_in_fan_out
354
+
355
+ self._active_adapter = adapter_name
356
+ self.update_layer(
357
+ adapter_name,
358
+ r,
359
+ lora_alpha=lora_alpha,
360
+ lora_dropout=lora_dropout,
361
+ init_lora_weights=init_lora_weights,
362
+ use_rslora=use_rslora,
363
+ use_dora=use_dora,
364
+ )
365
+ self.is_target_conv_1d_layer = is_target_conv_1d_layer
366
+
367
+ def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
368
+ """
369
+ Merge the active adapter weights into the base weights
370
+
371
+ Args:
372
+ safe_merge (`bool`, *optional*):
373
+ If True, the merge operation will be performed in a copy of the original weights and check for NaNs
374
+ before merging the weights. This is useful if you want to check if the merge operation will produce
375
+ NaNs. Defaults to `False`.
376
+ adapter_names (`list[str]`, *optional*):
377
+ The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
378
+ to `None`.
379
+ """
380
+ adapter_names = check_adapters_to_merge(self, adapter_names)
381
+ if not adapter_names:
382
+ # no adapter to merge
383
+ return
384
+
385
+ for active_adapter in adapter_names:
386
+ if active_adapter in self.lora_A.keys():
387
+ base_layer = self.get_base_layer()
388
+ if safe_merge:
389
+ # Note that safe_merge will be slower than the normal merge
390
+ # because of the copy operation.
391
+ orig_weights = base_layer.weight.data.clone()
392
+ delta_weight = self.get_delta_weight(active_adapter)
393
+ if not self.use_dora[active_adapter]:
394
+ orig_weights = orig_weights + delta_weight
395
+ else:
396
+ # handle dora
397
+ # since delta_weight already includes scaling, set it to 1 here
398
+ weight_norm = self._get_weight_norm(orig_weights, delta_weight, scaling=1).detach()
399
+ # We need to cache weight_norm because it has to be based on the original weights. We
400
+ # cannot calculate it on the fly based on the merged weights when unmerging because its a
401
+ # different value
402
+ self._cache_store(f"{active_adapter}-weight_norm", weight_norm)
403
+ dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm
404
+ orig_weights = dora_factor.view(-1, 1) * (orig_weights + delta_weight)
405
+
406
+ if not torch.isfinite(orig_weights).all():
407
+ raise ValueError(
408
+ f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
409
+ )
410
+
411
+ base_layer.weight.data = orig_weights
412
+ else:
413
+ delta_weight = self.get_delta_weight(active_adapter)
414
+ if not self.use_dora[active_adapter]:
415
+ base_layer.weight.data = base_layer.weight.data + delta_weight
416
+ else:
417
+ # handle dora
418
+ # since delta_weight already includes scaling, set it to 1 here
419
+ weight_norm = self._get_weight_norm(base_layer.weight, delta_weight, scaling=1).detach()
420
+ # We need to cache weight_norm because it has to be based on the original weights. We
421
+ # cannot calculate it on the fly based on the merged weights when unmerging because its a
422
+ # different value
423
+ self._cache_store(f"{active_adapter}-weight_norm", weight_norm)
424
+ dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm
425
+ new_weight = dora_factor.view(-1, 1) * (base_layer.weight.data + delta_weight)
426
+ base_layer.weight.data = new_weight
427
+
428
+ self.merged_adapters.append(active_adapter)
429
+
430
+ def unmerge(self) -> None:
431
+ """
432
+ This method unmerges all merged adapter layers from the base weights.
433
+ """
434
+ if not self.merged:
435
+ warnings.warn("Already unmerged. Nothing to do.")
436
+ return
437
+ while len(self.merged_adapters) > 0:
438
+ active_adapter = self.merged_adapters.pop()
439
+ if active_adapter in self.lora_A.keys():
440
+ weight = self.get_base_layer().weight
441
+ delta_weight = self.get_delta_weight(active_adapter)
442
+ if not self.use_dora[active_adapter]:
443
+ weight.data -= delta_weight
444
+ else:
445
+ weight_norm = self._cache_pop(f"{active_adapter}-weight_norm")
446
+ dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm
447
+ weight_orig = weight.data / dora_factor.view(-1, 1) - delta_weight
448
+ weight.data = weight_orig
449
+
450
+ def get_delta_weight(self, adapter) -> torch.Tensor:
451
+ """
452
+ Compute the delta weight for the given adapter.
453
+
454
+ Args:
455
+ adapter (str):
456
+ The name of the adapter for which the delta weight should be computed.
457
+ """
458
+ device = self.lora_B[adapter].weight.device
459
+ dtype = self.lora_B[adapter].weight.dtype
460
+
461
+ # In case users wants to merge the adapter weights that are in
462
+ # float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to
463
+ # float16 because the `@` and matmul operation in general is not supported in torch + cpu + fp16.
464
+ cast_to_fp32 = device.type == "cpu" and dtype == torch.float16
465
+
466
+ weight_A = self.lora_A[adapter].weight
467
+ weight_B = self.lora_B[adapter].weight
468
+
469
+ if cast_to_fp32:
470
+ weight_A = weight_A.float()
471
+ weight_B = weight_B.float()
472
+
473
+ output_tensor = transpose(weight_B @ weight_A, self.fan_in_fan_out) * self.scaling[adapter]
474
+
475
+ if cast_to_fp32:
476
+ output_tensor = output_tensor.to(dtype=dtype)
477
+
478
+ # cast back the weights
479
+ self.lora_A[adapter].weight.data = weight_A.to(dtype)
480
+ self.lora_B[adapter].weight.data = weight_B.to(dtype)
481
+
482
+ return output_tensor
483
+
484
+ def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
485
+ self._check_forward_args(x, *args, **kwargs)
486
+ adapter_names = kwargs.pop("adapter_names", None)
487
+
488
+ if self.disable_adapters:
489
+ if self.merged:
490
+ self.unmerge()
491
+ result = self.base_layer(x, *args, **kwargs)
492
+ elif adapter_names is not None:
493
+ result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs)
494
+ elif self.merged:
495
+ result = self.base_layer(x, *args, **kwargs)
496
+ else:
497
+ result = self.base_layer(x, *args, **kwargs)
498
+ torch_result_dtype = result.dtype
499
+ for active_adapter in self.active_adapters:
500
+ if active_adapter not in self.lora_A.keys():
501
+ continue
502
+ lora_A = self.lora_A[active_adapter]
503
+ lora_B = self.lora_B[active_adapter]
504
+ dropout = self.lora_dropout[active_adapter]
505
+ scaling = self.scaling[active_adapter]
506
+ x = x.to(lora_A.weight.dtype)
507
+
508
+ if not self.use_dora[active_adapter]:
509
+ result = result + lora_B(lora_A(dropout(x))) * scaling
510
+ else:
511
+ x = dropout(x)
512
+ result = result + self._apply_dora(x, lora_A, lora_B, scaling, active_adapter)
513
+
514
+ result = result.to(torch_result_dtype)
515
+
516
+ return result
517
+
518
+ def __repr__(self) -> str:
519
+ rep = super().__repr__()
520
+ return "lora." + rep
521
+
522
+
523
+ class Embedding(nn.Module, LoraLayer):
524
+ # LoRA implemented in a Embedding layer
525
+ def __init__(
526
+ self,
527
+ base_layer: nn.Module,
528
+ adapter_name: str,
529
+ r: int = 0,
530
+ lora_alpha: int = 1,
531
+ lora_dropout: float = 0.0,
532
+ init_lora_weights: Union[bool, str] = True,
533
+ use_rslora: bool = False,
534
+ use_dora: bool = False,
535
+ **kwargs,
536
+ ) -> None:
537
+ super().__init__()
538
+ LoraLayer.__init__(self, base_layer)
539
+
540
+ if use_dora:
541
+ raise ValueError(f"{self.__class__.__name__} does not support DoRA yet, please set it to False")
542
+
543
+ self._active_adapter = adapter_name
544
+ self.update_layer(
545
+ adapter_name,
546
+ r,
547
+ lora_alpha=lora_alpha,
548
+ lora_dropout=lora_dropout,
549
+ init_lora_weights=init_lora_weights,
550
+ use_rslora=use_rslora,
551
+ use_dora=use_dora,
552
+ )
553
+
554
+ def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora):
555
+ if r <= 0:
556
+ raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
557
+
558
+ self.r[adapter_name] = r
559
+ self.lora_alpha[adapter_name] = lora_alpha
560
+ if lora_dropout > 0.0:
561
+ lora_dropout_layer = nn.Dropout(p=lora_dropout)
562
+ else:
563
+ lora_dropout_layer = nn.Identity()
564
+
565
+ self.lora_dropout[adapter_name] = lora_dropout_layer
566
+ # Actual trainable parameters
567
+ weight_A = torch.randn((r, self.in_features))
568
+ weight_B = torch.randn((self.out_features, r))
569
+ self.lora_embedding_A[adapter_name] = nn.Parameter(weight_A)
570
+ self.lora_embedding_B[adapter_name] = nn.Parameter(weight_B)
571
+ if use_rslora:
572
+ self.scaling[adapter_name] = lora_alpha / math.sqrt(r)
573
+ else:
574
+ self.scaling[adapter_name] = lora_alpha / r
575
+
576
+ if init_lora_weights == "loftq":
577
+ self.loftq_init(adapter_name)
578
+ elif init_lora_weights:
579
+ self.reset_lora_parameters(adapter_name, init_lora_weights)
580
+
581
+ base_layer = self.get_base_layer()
582
+ weight = getattr(base_layer, "weight", None)
583
+ if weight is not None:
584
+ # the layer is already completely initialized, this is an update
585
+ self.to(base_layer.weight.device, dtype=weight.dtype)
586
+
587
+ self.set_adapter(self.active_adapters)
588
+
589
+ def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
590
+ """
591
+ Merge the active adapter weights into the base weights
592
+
593
+ Args:
594
+ safe_merge (`bool`, *optional*):
595
+ If True, the merge operation will be performed in a copy of the original weights and check for NaNs
596
+ before merging the weights. This is useful if you want to check if the merge operation will produce
597
+ NaNs. Defaults to `False`.
598
+ adapter_names (`list[str]`, *optional*):
599
+ The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
600
+ to `None`.
601
+ """
602
+ adapter_names = check_adapters_to_merge(self, adapter_names)
603
+ if not adapter_names:
604
+ # no adapter to merge
605
+ return
606
+
607
+ for active_adapter in adapter_names:
608
+ if active_adapter in self.lora_embedding_A.keys():
609
+ base_layer = self.get_base_layer()
610
+ if safe_merge:
611
+ # Note that safe_merge will be slower than the normal merge
612
+ # because of the copy operation.
613
+ orig_weights = base_layer.weight.data.clone()
614
+ orig_weights = orig_weights + self.get_delta_weight(active_adapter)
615
+
616
+ if not torch.isfinite(orig_weights).all():
617
+ raise ValueError(
618
+ f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
619
+ )
620
+
621
+ base_layer.weight.data = orig_weights
622
+ else:
623
+ base_layer.weight.data = base_layer.weight.data + self.get_delta_weight(active_adapter)
624
+ self.merged_adapters.append(active_adapter)
625
+
626
+ def unmerge(self) -> None:
627
+ """
628
+ This method unmerges all merged adapter layers from the base weights.
629
+ """
630
+ if not self.merged:
631
+ warnings.warn("Already unmerged. Nothing to do.")
632
+ return
633
+ while len(self.merged_adapters) > 0:
634
+ active_adapter = self.merged_adapters.pop()
635
+ if active_adapter in self.lora_embedding_A.keys():
636
+ self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter)
637
+
638
+ def get_delta_weight(self, adapter) -> torch.Tensor:
639
+ """
640
+ Compute the delta weight for the given adapter.
641
+
642
+ Args:
643
+ adapter (str):
644
+ The name of the adapter for which the delta weight should be computed.
645
+ """
646
+ device = self.lora_embedding_B[adapter].device
647
+ dtype = self.lora_embedding_A[adapter].dtype
648
+
649
+ # In case users wants to merge the adapter weights that are in
650
+ # float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to
651
+ # float16 because the `@` and matmul operation in general is not supported in torch + cpu + fp16.
652
+ cast_to_fp32 = device.type == "cpu" and dtype == torch.float16
653
+
654
+ weight_A = self.lora_embedding_A[adapter]
655
+ weight_B = self.lora_embedding_B[adapter]
656
+
657
+ if cast_to_fp32:
658
+ weight_A = weight_A.float()
659
+ weight_B = weight_B.float()
660
+
661
+ output_tensor = transpose(weight_B @ weight_A, True) * self.scaling[adapter]
662
+
663
+ if cast_to_fp32:
664
+ output_tensor = output_tensor.to(dtype=dtype)
665
+
666
+ # cast back the weights
667
+ self.lora_embedding_A[adapter] = weight_A.to(dtype)
668
+ self.lora_embedding_B[adapter] = weight_B.to(dtype)
669
+
670
+ return output_tensor
671
+
672
+ def _mixed_batch_forward(
673
+ self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any
674
+ ) -> torch.Tensor:
675
+ # This is a special method that handles the case when users pass the argument `adapter_names`. This is an
676
+ # extra argument that allows mixing different adapters in the same batch at inference time.
677
+ result = self.base_layer(x, *args, **kwargs)
678
+
679
+ unique_adapters = set(adapter_names)
680
+ sub_batch_indices_list = []
681
+ for adapter in unique_adapters:
682
+ sub_batch_indices_list.append([index for index, item in enumerate(adapter_names) if item == adapter])
683
+
684
+ for i, active_adapter in enumerate(unique_adapters):
685
+ if active_adapter == "__base__":
686
+ continue
687
+ if active_adapter not in self.lora_embedding_A.keys():
688
+ continue
689
+
690
+ embedding_A = self.lora_embedding_A[active_adapter].T
691
+ embedding_B = self.lora_embedding_B[active_adapter].T
692
+ scaling = self.scaling[active_adapter]
693
+
694
+ # getting the sub-batch, passing it to LoRA layers and updating the corresponding indices of the linear
695
+ # layer output
696
+ sub_batch = x[sub_batch_indices_list[i]]
697
+ after_A = self._embed(sub_batch, embedding_A)
698
+ result[sub_batch_indices_list[i]] += (after_A @ embedding_B) * scaling
699
+
700
+ return result
701
+
702
+ def _embed(self, input: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
703
+ base_layer = self.get_base_layer()
704
+ return F.embedding(
705
+ input,
706
+ weight,
707
+ padding_idx=base_layer.padding_idx,
708
+ max_norm=base_layer.max_norm,
709
+ norm_type=base_layer.norm_type,
710
+ scale_grad_by_freq=base_layer.scale_grad_by_freq,
711
+ sparse=base_layer.sparse,
712
+ )
713
+
714
+ def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
715
+ # TODO: no dtype conversion here, unlike in Linear, is that correct?
716
+ self._check_forward_args(x, *args, **kwargs)
717
+ adapter_names = kwargs.pop("adapter_names", None)
718
+
719
+ if self.disable_adapters:
720
+ if self.merged:
721
+ self.unmerge()
722
+ result = self.base_layer(x, *args, **kwargs)
723
+ elif adapter_names is not None:
724
+ result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs)
725
+ elif self.merged:
726
+ result = self.base_layer(x, *args, **kwargs)
727
+ else:
728
+ result = self.base_layer(x, *args, **kwargs)
729
+ torch_result_dtype = result.dtype
730
+ for active_adapter in self.active_adapters:
731
+ if active_adapter not in self.lora_embedding_A:
732
+ continue
733
+ embedding_A = self.lora_embedding_A[active_adapter].T
734
+ embedding_B = self.lora_embedding_B[active_adapter].T
735
+ scaling = self.scaling[active_adapter]
736
+ after_A = self._embed(x, embedding_A)
737
+ result = result + (after_A @ embedding_B) * scaling
738
+ result = result.to(torch_result_dtype)
739
+
740
+ return result
741
+
742
+ def __repr__(self) -> str:
743
+ rep = super().__repr__()
744
+ return "lora." + rep
745
+
746
+
747
+ class Conv2d(nn.Module, LoraLayer):
748
+ # Lora implemented in a conv2d layer
749
+ def __init__(
750
+ self,
751
+ base_layer: nn.Module,
752
+ adapter_name: str,
753
+ r: int = 0,
754
+ lora_alpha: int = 1,
755
+ lora_dropout: float = 0.0,
756
+ init_lora_weights: Union[bool, str] = True,
757
+ use_rslora: bool = False,
758
+ use_dora: bool = False,
759
+ **kwargs,
760
+ ) -> None:
761
+ super().__init__()
762
+ LoraLayer.__init__(self, base_layer)
763
+
764
+ self._active_adapter = adapter_name
765
+ self.update_layer(
766
+ adapter_name,
767
+ r,
768
+ lora_alpha=lora_alpha,
769
+ lora_dropout=lora_dropout,
770
+ init_lora_weights=init_lora_weights,
771
+ use_rslora=use_rslora,
772
+ use_dora=use_dora,
773
+ )
774
+
775
+ def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora):
776
+ if r <= 0:
777
+ raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
778
+
779
+ self.r[adapter_name] = r
780
+ self.lora_alpha[adapter_name] = lora_alpha
781
+ if lora_dropout > 0.0:
782
+ lora_dropout_layer = nn.Dropout(p=lora_dropout)
783
+ else:
784
+ lora_dropout_layer = nn.Identity()
785
+
786
+ self.lora_dropout[adapter_name] = lora_dropout_layer
787
+ # Actual trainable parameters
788
+ base_layer = self.get_base_layer()
789
+ kernel_size = base_layer.kernel_size
790
+ stride = base_layer.stride
791
+ padding = base_layer.padding
792
+ self.lora_A[adapter_name] = nn.Conv2d(self.in_features, r, kernel_size, stride, padding, bias=False)
793
+ self.lora_B[adapter_name] = nn.Conv2d(r, self.out_features, (1, 1), (1, 1), bias=False)
794
+ if use_rslora:
795
+ self.scaling[adapter_name] = lora_alpha / math.sqrt(r)
796
+ else:
797
+ self.scaling[adapter_name] = lora_alpha / r
798
+
799
+ if init_lora_weights == "loftq":
800
+ self.loftq_init(adapter_name)
801
+ elif init_lora_weights:
802
+ self.reset_lora_parameters(adapter_name, init_lora_weights)
803
+
804
+ weight = getattr(base_layer, "weight", None)
805
+ if weight is not None:
806
+ # the layer is already completely initialized, this is an update
807
+ self.to(base_layer.weight.device, dtype=weight.dtype)
808
+
809
+ if use_dora:
810
+ self.dora_init(adapter_name)
811
+ self.use_dora[adapter_name] = True
812
+ else:
813
+ self.use_dora[adapter_name] = False
814
+
815
+ self.set_adapter(self.active_adapters)
816
+
817
+ def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
818
+ """
819
+ Merge the active adapter weights inside the base weights
820
+
821
+ Args:
822
+ safe_merge (`bool`, *optional*):
823
+ If True, the merge operation will be performed in a copy of the original weights and check for NaNs
824
+ before merging the weights. This is useful if you want to check if the merge operation will produce
825
+ NaNs. Defaults to `False`.
826
+ adapter_names (`list[str]`, *optional*):
827
+ The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
828
+ to `None`.
829
+ """
830
+ adapter_names = check_adapters_to_merge(self, adapter_names)
831
+ if not adapter_names:
832
+ # no adapter to merge
833
+ return
834
+
835
+ for active_adapter in adapter_names:
836
+ if active_adapter in self.lora_A.keys():
837
+ base_layer = self.get_base_layer()
838
+ if safe_merge:
839
+ # Note that safe_merge will be slower than the normal merge
840
+ # because of the copy operation.
841
+ orig_weights = base_layer.weight.data.clone()
842
+ delta_weight = self.get_delta_weight(active_adapter)
843
+
844
+ if not self.use_dora[active_adapter]:
845
+ orig_weights = orig_weights + delta_weight
846
+ else:
847
+ # handle dora
848
+ # since delta_weight already includes scaling, set it to 1 here
849
+ weight_norm = self._get_weight_norm(orig_weights, delta_weight, scaling=1).detach()
850
+ # We need to cache weight_norm because it has to be based on the original weights. We
851
+ # cannot calculate it on the fly based on the merged weights when unmerging because its a
852
+ # different value
853
+ self._cache_store(f"{active_adapter}-weight_norm", weight_norm)
854
+ dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm
855
+ orig_weights = dora_factor.view(-1, 1, 1, 1) * (orig_weights + delta_weight)
856
+
857
+ if not torch.isfinite(orig_weights).all():
858
+ raise ValueError(
859
+ f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
860
+ )
861
+ base_layer.weight.data = orig_weights
862
+ else:
863
+ delta_weight = self.get_delta_weight(active_adapter)
864
+ if not self.use_dora[active_adapter]:
865
+ base_layer.weight.data = base_layer.weight.data + delta_weight
866
+ else:
867
+ # handle dora
868
+ # since delta_weight already includes scaling, set it to 1 here
869
+ weight_norm = self._get_weight_norm(base_layer.weight, delta_weight, scaling=1).detach()
870
+ # We need to cache weight_norm because it has to be based on the original weights. We
871
+ # cannot calculate it on the fly based on the merged weights when unmerging because its a
872
+ # different value
873
+ self._cache_store(f"{active_adapter}-weight_norm", weight_norm)
874
+ dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm
875
+ new_weight = dora_factor.view(-1, 1, 1, 1) * (base_layer.weight.data + delta_weight)
876
+ base_layer.weight.data = new_weight
877
+
878
+ self.merged_adapters.append(active_adapter)
879
+
880
+ def unmerge(self) -> None:
881
+ """
882
+ This method unmerges all merged adapter layers from the base weights.
883
+ """
884
+ if not self.merged:
885
+ warnings.warn("Already unmerged. Nothing to do.")
886
+ return
887
+ while len(self.merged_adapters) > 0:
888
+ active_adapter = self.merged_adapters.pop()
889
+ if active_adapter in self.lora_A.keys():
890
+ weight = self.get_base_layer().weight
891
+ delta_weight = self.get_delta_weight(active_adapter)
892
+ if not self.use_dora[active_adapter]:
893
+ weight.data -= delta_weight
894
+ else:
895
+ weight_norm = self._cache_pop(f"{active_adapter}-weight_norm")
896
+ dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm
897
+ weight_orig = weight.data / dora_factor.view(-1, 1, 1, 1) - delta_weight
898
+ weight.data = weight_orig
899
+
900
+ def get_delta_weight(self, adapter) -> torch.Tensor:
901
+ """
902
+ Compute the delta weight for the given adapter.
903
+
904
+ Args:
905
+ adapter (str):
906
+ The name of the adapter for which the delta weight should be computed.
907
+ """
908
+ device = self.lora_B[adapter].weight.device
909
+ dtype = self.lora_A[adapter].weight.dtype
910
+
911
+ # In case users wants to merge the adapter weights that are in
912
+ # float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to
913
+ # float16 because the `@` and matmul operation in general is not supported in torch + cpu + fp16.
914
+ cast_to_fp32 = device.type == "cpu" and dtype == torch.float16
915
+
916
+ weight_A = self.lora_A[adapter].weight
917
+ weight_B = self.lora_B[adapter].weight
918
+
919
+ if cast_to_fp32:
920
+ weight_A = weight_A.float()
921
+ weight_B = weight_B.float()
922
+
923
+ # https://github.com/bmaltais/kohya_ss/blob/feb6728762a8f463d15ba936d189d4c3abfaa1ab/networks/lora.py#L117
924
+ if self.get_base_layer().weight.size()[2:4] == (1, 1):
925
+ # conv2d 1x1
926
+ output_tensor = (weight_B.squeeze(3).squeeze(2) @ weight_A.squeeze(3).squeeze(2)).unsqueeze(2).unsqueeze(
927
+ 3
928
+ ) * self.scaling[adapter]
929
+ else:
930
+ # conv2d 3x3
931
+ output_tensor = (
932
+ F.conv2d(
933
+ weight_A.permute(1, 0, 2, 3),
934
+ weight_B,
935
+ ).permute(1, 0, 2, 3)
936
+ * self.scaling[adapter]
937
+ )
938
+
939
+ if cast_to_fp32:
940
+ output_tensor = output_tensor.to(dtype=dtype)
941
+
942
+ # cast back the weights
943
+ self.lora_A[adapter].weight.data = weight_A.to(dtype)
944
+ self.lora_B[adapter].weight.data = weight_B.to(dtype)
945
+
946
+ return output_tensor
947
+
948
+ def _get_weight_norm(self, weight, lora_weight, scaling) -> torch.Tensor:
949
+ # calculate L2 norm of weight matrix, channel-wise
950
+ weight = weight + scaling * lora_weight
951
+ # the following is needed to have compatibility with the 4D weight tensors of Conv2D
952
+ weight_norm = weight.norm(p=2, dim=(1, 2, 3), keepdim=True).transpose(1, 0)
953
+ return weight_norm
954
+
955
+ def _apply_dora(self, x, lora_A, lora_B, scaling, active_adapter):
956
+ """
957
+ For DoRA, calculate the extra output from LoRA with DoRA applied. This should be added on top of the base layer
958
+ output.
959
+ """
960
+ base_layer = self.get_base_layer()
961
+ weight = base_layer.weight
962
+ lora_weight = torch.mm(lora_B.weight.flatten(start_dim=1), lora_A.weight.flatten(start_dim=1))
963
+ lora_weight = lora_weight.reshape(weight.shape)
964
+ magnitude = self.lora_magnitude_vector[active_adapter]
965
+ weight_norm = self._get_weight_norm(weight, lora_weight, scaling)
966
+ # see section 4.3 of DoRA (https://arxiv.org/abs/2402.09353)
967
+ # "[...] we suggest treating ||V +∆V ||_c in
968
+ # Eq. (5) as a constant, thereby detaching it from the gradient
969
+ # graph. This means that while ||V + ∆V ||_c dynamically
970
+ # reflects the updates of ∆V , it won’t receive any gradient
971
+ # during backpropagation"
972
+ weight_norm = weight_norm.detach()
973
+ mag_norm_scale = magnitude / weight_norm
974
+ result_dora = (mag_norm_scale - 1) * (
975
+ F.conv2d(
976
+ x,
977
+ weight,
978
+ bias=None,
979
+ stride=base_layer.stride,
980
+ padding=base_layer.padding,
981
+ dilation=base_layer.dilation,
982
+ groups=base_layer.groups,
983
+ )
984
+ ) + mag_norm_scale * lora_B(lora_A(x)) * scaling
985
+
986
+ return result_dora
987
+
988
+ def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
989
+ self._check_forward_args(x, *args, **kwargs)
990
+ adapter_names = kwargs.pop("adapter_names", None)
991
+
992
+ if self.disable_adapters:
993
+ if self.merged:
994
+ self.unmerge()
995
+ result = self.base_layer(x, *args, **kwargs)
996
+ elif adapter_names is not None:
997
+ result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs)
998
+ elif self.merged:
999
+ result = self.base_layer(x, *args, **kwargs)
1000
+ else:
1001
+ result = self.base_layer(x, *args, **kwargs)
1002
+ torch_result_dtype = result.dtype
1003
+
1004
+ for active_adapter in self.active_adapters:
1005
+ if active_adapter not in self.lora_A.keys():
1006
+ continue
1007
+ lora_A = self.lora_A[active_adapter]
1008
+ lora_B = self.lora_B[active_adapter]
1009
+ dropout = self.lora_dropout[active_adapter]
1010
+ scaling = self.scaling[active_adapter]
1011
+ x = x.to(lora_A.weight.dtype)
1012
+
1013
+ if not self.use_dora[active_adapter]:
1014
+ result = result + lora_B(lora_A(dropout(x))) * scaling
1015
+ else:
1016
+ x = dropout(x)
1017
+ result = result + self._apply_dora(x, lora_A, lora_B, scaling, active_adapter)
1018
+
1019
+ result = result.to(torch_result_dtype)
1020
+ return result
1021
+
1022
+ def __repr__(self) -> str:
1023
+ rep = super().__repr__()
1024
+ return "lora." + rep
1025
+
1026
+
1027
+ def dispatch_default(
1028
+ target: torch.nn.Module,
1029
+ adapter_name: str,
1030
+ lora_config: LoraConfig,
1031
+ **kwargs,
1032
+ ) -> Optional[torch.nn.Module]:
1033
+ new_module = None
1034
+
1035
+ if isinstance(target, BaseTunerLayer):
1036
+ target_base_layer = target.get_base_layer()
1037
+ else:
1038
+ target_base_layer = target
1039
+
1040
+ if isinstance(target_base_layer, torch.nn.Embedding):
1041
+ embedding_kwargs = kwargs.copy()
1042
+ embedding_kwargs.pop("fan_in_fan_out", None)
1043
+ embedding_kwargs.update(lora_config.loftq_config)
1044
+ new_module = Embedding(target, adapter_name, **embedding_kwargs)
1045
+ elif isinstance(target_base_layer, torch.nn.Conv2d):
1046
+ kwargs.update(lora_config.loftq_config)
1047
+ new_module = Conv2d(target, adapter_name, **kwargs)
1048
+ elif isinstance(target_base_layer, torch.nn.Linear):
1049
+ if kwargs["fan_in_fan_out"]:
1050
+ warnings.warn(
1051
+ "fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. "
1052
+ "Setting fan_in_fan_out to False."
1053
+ )
1054
+ kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False
1055
+ kwargs.update(lora_config.loftq_config)
1056
+ new_module = Linear(target, adapter_name, **kwargs)
1057
+ elif isinstance(target_base_layer, Conv1D):
1058
+ if not kwargs["fan_in_fan_out"]:
1059
+ warnings.warn(
1060
+ "fan_in_fan_out is set to False but the target module is `Conv1D`. " "Setting fan_in_fan_out to True."
1061
+ )
1062
+ kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = True
1063
+ kwargs.update(lora_config.loftq_config)
1064
+ new_module = Linear(target, adapter_name, is_target_conv_1d_layer=True, **kwargs)
1065
+
1066
+ return new_module
env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/model.py ADDED
@@ -0,0 +1,793 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from __future__ import annotations
15
+
16
+ import math
17
+ import operator
18
+ import re
19
+ import warnings
20
+ from contextlib import contextmanager
21
+ from dataclasses import asdict, replace
22
+ from enum import Enum
23
+ from functools import partial, reduce
24
+ from itertools import chain
25
+ from typing import Literal, Optional
26
+
27
+ import torch
28
+ from torch import nn
29
+ from tqdm import tqdm
30
+
31
+ from peft.import_utils import is_bnb_4bit_available, is_bnb_available
32
+ from peft.tuners.tuners_utils import (
33
+ BaseTuner,
34
+ BaseTunerLayer,
35
+ check_target_module_exists,
36
+ onload_layer,
37
+ replicate_layers,
38
+ )
39
+ from peft.utils import (
40
+ TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING,
41
+ ModulesToSaveWrapper,
42
+ _freeze_adapter,
43
+ _get_submodules,
44
+ get_quantization_config,
45
+ )
46
+ from peft.utils.merge_utils import dare_linear, dare_ties, magnitude_prune, task_arithmetic, ties
47
+
48
+ from .aqlm import dispatch_aqlm
49
+ from .awq import dispatch_awq
50
+ from .config import LoraConfig
51
+ from .gptq import dispatch_gptq
52
+ from .layer import Conv2d, LoraLayer, dispatch_default
53
+ from .tp_layer import dispatch_megatron
54
+
55
+
56
+ def _adapter_names_pre_forward_hook(target, args, kwargs, adapter_names):
57
+ # pre-forward hook to inject the adapter_names argument when using mixed adapter batches inference
58
+ kwargs["adapter_names"] = adapter_names
59
+ return args, kwargs
60
+
61
+
62
+ class LoraModel(BaseTuner):
63
+ """
64
+ Creates Low Rank Adapter (LoRA) model from a pretrained transformers model.
65
+
66
+ The method is described in detail in https://arxiv.org/abs/2106.09685.
67
+
68
+ Args:
69
+ model ([`torch.nn.Module`]): The model to be adapted.
70
+ config ([`LoraConfig`]): The configuration of the Lora model.
71
+ adapter_name (`str`): The name of the adapter, defaults to `"default"`.
72
+
73
+ Returns:
74
+ `torch.nn.Module`: The Lora model.
75
+
76
+ Example:
77
+
78
+ ```py
79
+ >>> from transformers import AutoModelForSeq2SeqLM
80
+ >>> from peft import LoraModel, LoraConfig
81
+
82
+ >>> config = LoraConfig(
83
+ ... task_type="SEQ_2_SEQ_LM",
84
+ ... r=8,
85
+ ... lora_alpha=32,
86
+ ... target_modules=["q", "v"],
87
+ ... lora_dropout=0.01,
88
+ ... )
89
+
90
+ >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
91
+ >>> lora_model = LoraModel(model, config, "default")
92
+ ```
93
+
94
+ ```py
95
+ >>> import torch
96
+ >>> import transformers
97
+ >>> from peft import LoraConfig, PeftModel, get_peft_model, prepare_model_for_kbit_training
98
+
99
+ >>> rank = ...
100
+ >>> target_modules = ["q_proj", "k_proj", "v_proj", "out_proj", "fc_in", "fc_out", "wte"]
101
+ >>> config = LoraConfig(
102
+ ... r=4, lora_alpha=16, target_modules=target_modules, lora_dropout=0.1, bias="none", task_type="CAUSAL_LM"
103
+ ... )
104
+ >>> quantization_config = transformers.BitsAndBytesConfig(load_in_8bit=True)
105
+
106
+ >>> tokenizer = transformers.AutoTokenizer.from_pretrained(
107
+ ... "kakaobrain/kogpt",
108
+ ... revision="KoGPT6B-ryan1.5b-float16", # or float32 version: revision=KoGPT6B-ryan1.5b
109
+ ... bos_token="[BOS]",
110
+ ... eos_token="[EOS]",
111
+ ... unk_token="[UNK]",
112
+ ... pad_token="[PAD]",
113
+ ... mask_token="[MASK]",
114
+ ... )
115
+ >>> model = transformers.GPTJForCausalLM.from_pretrained(
116
+ ... "kakaobrain/kogpt",
117
+ ... revision="KoGPT6B-ryan1.5b-float16", # or float32 version: revision=KoGPT6B-ryan1.5b
118
+ ... pad_token_id=tokenizer.eos_token_id,
119
+ ... use_cache=False,
120
+ ... device_map={"": rank},
121
+ ... torch_dtype=torch.float16,
122
+ ... quantization_config=quantization_config,
123
+ ... )
124
+ >>> model = prepare_model_for_kbit_training(model)
125
+ >>> lora_model = get_peft_model(model, config)
126
+ ```
127
+
128
+ **Attributes**:
129
+ - **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted.
130
+ - **peft_config** ([`LoraConfig`]): The configuration of the Lora model.
131
+ """
132
+
133
+ prefix: str = "lora_"
134
+
135
+ def __init__(self, model, config, adapter_name) -> None:
136
+ super().__init__(model, config, adapter_name)
137
+
138
+ def _check_new_adapter_config(self, config: LoraConfig) -> None:
139
+ """
140
+ A helper method to check the config when a new adapter is being added.
141
+
142
+ Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters.
143
+
144
+ """
145
+ # TODO: there should be a check if any of the existing adapters actually has bias != "none", or else the check
146
+ # does not fully correspond to the error message.
147
+ if (len(self.peft_config) > 1) and (config.bias != "none"):
148
+ raise ValueError(
149
+ f"{self.__class__.__name__} supports only 1 adapter with bias. When using multiple adapters, "
150
+ "set bias to 'none' for all adapters."
151
+ )
152
+
153
+ @staticmethod
154
+ def _check_target_module_exists(lora_config, key):
155
+ return check_target_module_exists(lora_config, key)
156
+
157
+ def _prepare_model(self, peft_config: LoraConfig, model: nn.Module):
158
+ r"""
159
+ A private method to modify the model structure before adapter is applied.
160
+
161
+ Args:
162
+ peft_config (`PeftConfig`):
163
+ The prepared adapter config.
164
+ model (`nn.Module`):
165
+ The model that is going to be adapted.
166
+ """
167
+ if peft_config.layer_replication:
168
+ replicate_layers(model, peft_config.layer_replication)
169
+
170
+ def _create_and_replace(
171
+ self,
172
+ lora_config,
173
+ adapter_name,
174
+ target,
175
+ target_name,
176
+ parent,
177
+ current_key,
178
+ ):
179
+ if current_key is None:
180
+ raise ValueError("Current Key shouldn't be `None`")
181
+
182
+ # Regexp matching - Find key which matches current target_name in patterns provided
183
+ pattern_keys = list(chain(lora_config.rank_pattern.keys(), lora_config.alpha_pattern.keys()))
184
+ target_name_key = next(filter(lambda key: re.match(rf".*\.{key}$", current_key), pattern_keys), current_key)
185
+ r = lora_config.rank_pattern.get(target_name_key, lora_config.r)
186
+ alpha = lora_config.alpha_pattern.get(target_name_key, lora_config.lora_alpha)
187
+
188
+ kwargs = {
189
+ "r": r,
190
+ "lora_alpha": alpha,
191
+ "lora_dropout": lora_config.lora_dropout,
192
+ "fan_in_fan_out": lora_config.fan_in_fan_out,
193
+ "init_lora_weights": lora_config.init_lora_weights,
194
+ "use_rslora": lora_config.use_rslora,
195
+ "use_dora": lora_config.use_dora,
196
+ "loaded_in_8bit": getattr(self.model, "is_loaded_in_8bit", False),
197
+ "loaded_in_4bit": getattr(self.model, "is_loaded_in_4bit", False),
198
+ }
199
+
200
+ quant_methods = ["gptq", "aqlm", "awq"]
201
+ for quant_method in quant_methods:
202
+ quantization_config = get_quantization_config(self.model, method=quant_method)
203
+ if quantization_config is not None:
204
+ kwargs[f"{quant_method}_quantization_config"] = quantization_config
205
+
206
+ # note: AdaLoraLayer is a subclass of LoraLayer, we need to exclude it
207
+ from peft.tuners.adalora import AdaLoraLayer
208
+
209
+ if isinstance(target, LoraLayer) and not isinstance(target, AdaLoraLayer):
210
+ target.update_layer(
211
+ adapter_name,
212
+ r,
213
+ lora_alpha=alpha,
214
+ lora_dropout=lora_config.lora_dropout,
215
+ init_lora_weights=lora_config.init_lora_weights,
216
+ use_rslora=lora_config.use_rslora,
217
+ use_dora=lora_config.use_dora,
218
+ )
219
+ else:
220
+ new_module = self._create_new_module(lora_config, adapter_name, target, **kwargs)
221
+ if adapter_name != self.active_adapter:
222
+ # adding an additional adapter: it is not automatically trainable
223
+ new_module.requires_grad_(False)
224
+ self._replace_module(parent, target_name, new_module, target)
225
+
226
+ def _replace_module(self, parent, child_name, new_module, child):
227
+ setattr(parent, child_name, new_module)
228
+ # It's not necessary to set requires_grad here, as that is handled by
229
+ # _mark_only_adapters_as_trainable
230
+
231
+ # child layer wraps the original module, unpack it
232
+ if hasattr(child, "base_layer"):
233
+ child = child.base_layer
234
+
235
+ if not hasattr(new_module, "base_layer"):
236
+ new_module.weight = child.weight
237
+ if hasattr(child, "bias"):
238
+ new_module.bias = child.bias
239
+
240
+ if getattr(child, "state", None) is not None:
241
+ if hasattr(new_module, "base_layer"):
242
+ new_module.base_layer.state = child.state
243
+ else:
244
+ new_module.state = child.state
245
+ new_module.to(child.weight.device)
246
+
247
+ # dispatch to correct device
248
+ for name, module in new_module.named_modules():
249
+ if (self.prefix in name) or ("ranknum" in name):
250
+ weight = child.qweight if hasattr(child, "qweight") else child.weight
251
+ module.to(weight.device)
252
+
253
+ def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None:
254
+ for n, p in model.named_parameters():
255
+ if self.prefix not in n:
256
+ p.requires_grad = False
257
+
258
+ for active_adapter in self.active_adapters:
259
+ bias = self.peft_config[active_adapter].bias
260
+ if bias == "none":
261
+ continue
262
+
263
+ if bias == "all":
264
+ for n, p in model.named_parameters():
265
+ if "bias" in n:
266
+ p.requires_grad = True
267
+ elif bias == "lora_only":
268
+ for m in model.modules():
269
+ if isinstance(m, LoraLayer) and hasattr(m, "bias") and m.bias is not None:
270
+ m.bias.requires_grad = True
271
+ else:
272
+ raise NotImplementedError(f"Requested bias: {bias}, is not implemented.")
273
+
274
+ @staticmethod
275
+ def _create_new_module(lora_config, adapter_name, target, **kwargs):
276
+ # Collect dispatcher functions to decide what backend to use for the replaced LoRA layer. The order matters,
277
+ # because the first match is always used. Therefore, the default layers should be checked last.
278
+ dispatchers = []
279
+
280
+ # avoid eager bnb import
281
+ if is_bnb_available():
282
+ from .bnb import dispatch_bnb_8bit
283
+
284
+ dispatchers.append(dispatch_bnb_8bit)
285
+
286
+ if is_bnb_4bit_available():
287
+ from .bnb import dispatch_bnb_4bit
288
+
289
+ dispatchers.append(dispatch_bnb_4bit)
290
+
291
+ dispatchers.extend([dispatch_aqlm, dispatch_awq, dispatch_gptq, dispatch_megatron, dispatch_default])
292
+
293
+ new_module = None
294
+ for dispatcher in dispatchers:
295
+ new_module = dispatcher(target, adapter_name, lora_config=lora_config, **kwargs)
296
+ if new_module is not None: # first match wins
297
+ break
298
+
299
+ if new_module is None:
300
+ # no module could be matched
301
+ raise ValueError(
302
+ f"Target module {target} is not supported. Currently, only the following modules are supported: "
303
+ "`torch.nn.Linear`, `torch.nn.Embedding`, `torch.nn.Conv2d`, `transformers.pytorch_utils.Conv1D`."
304
+ )
305
+
306
+ return new_module
307
+
308
+ def __getattr__(self, name: str):
309
+ """Forward missing attributes to the wrapped module."""
310
+ try:
311
+ return super().__getattr__(name) # defer to nn.Module's logic
312
+ except AttributeError:
313
+ return getattr(self.model, name)
314
+
315
+ def get_peft_config_as_dict(self, inference: bool = False):
316
+ config_dict = {}
317
+ for key, value in self.peft_config.items():
318
+ config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()}
319
+ if inference:
320
+ config["inference_mode"] = True
321
+ config_dict[key] = config
322
+ return config
323
+
324
+ def _set_adapter_layers(self, enabled: bool = True) -> None:
325
+ for module in self.model.modules():
326
+ if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)):
327
+ module.enable_adapters(enabled)
328
+
329
+ def enable_adapter_layers(self) -> None:
330
+ """Enable all adapters.
331
+
332
+ Call this if you have previously disabled all adapters and want to re-enable them.
333
+ """
334
+ self._set_adapter_layers(enabled=True)
335
+
336
+ def disable_adapter_layers(self) -> None:
337
+ """Disable all adapters.
338
+
339
+ When disabling all adapters, the model output corresponds to the output of the base model.
340
+ """
341
+ for active_adapter in self.active_adapters:
342
+ val = self.peft_config[active_adapter].bias
343
+ if val != "none":
344
+ msg = (
345
+ f"Careful, disabling adapter layers with bias configured to be '{val}' does not produce the same "
346
+ "output as the the base model would without adaption."
347
+ )
348
+ warnings.warn(msg)
349
+ self._set_adapter_layers(enabled=False)
350
+
351
+ def set_adapter(self, adapter_name: str | list[str]) -> None:
352
+ """Set the active adapter(s).
353
+
354
+ Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is
355
+ not desired, use the following code.
356
+
357
+ ```py
358
+ >>> for name, param in model_peft.named_parameters():
359
+ ... if ...: # some check on name (ex. if 'lora' in name)
360
+ ... param.requires_grad = False
361
+ ```
362
+
363
+ Args:
364
+ adapter_name (`str` or `list[str]`): Name of the adapter(s) to be activated.
365
+ """
366
+ for module in self.model.modules():
367
+ if isinstance(module, LoraLayer):
368
+ if module.merged:
369
+ warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.")
370
+ module.unmerge()
371
+ module.set_adapter(adapter_name)
372
+ self.active_adapter = adapter_name
373
+
374
+ @contextmanager
375
+ def _enable_peft_forward_hooks(self, *args, **kwargs):
376
+ # If adapter_names is passed as an argument, we inject it into the forward arguments.
377
+ adapter_names = kwargs.pop("adapter_names", None)
378
+ if adapter_names is None:
379
+ # nothing to do
380
+ yield
381
+ return
382
+
383
+ if self.training:
384
+ raise ValueError("Cannot pass `adapter_names` when the model is in training mode.")
385
+
386
+ hook_handles = []
387
+ for module in self.modules():
388
+ if isinstance(module, LoraLayer):
389
+ pre_forward = partial(_adapter_names_pre_forward_hook, adapter_names=adapter_names)
390
+ handle = module.register_forward_pre_hook(pre_forward, with_kwargs=True)
391
+ hook_handles.append(handle)
392
+
393
+ yield
394
+
395
+ for handle in hook_handles:
396
+ handle.remove()
397
+
398
+ def _check_merge_allowed(self):
399
+ """Verify that the configuration supports merging.
400
+
401
+ Currently gptq quantization and replicated layers do not support merging.
402
+ """
403
+ if getattr(self.model, "quantization_method", None) == "gptq":
404
+ raise ValueError("Cannot merge LORA layers when the model is gptq quantized")
405
+ if self.peft_config.get("layer_replication"):
406
+ raise ValueError("Cannot merge LORA layers when base model layers are replicated")
407
+
408
+ @staticmethod
409
+ def _prepare_adapter_config(peft_config, model_config):
410
+ if peft_config.target_modules is None:
411
+ if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING:
412
+ raise ValueError("Please specify `target_modules` in `peft_config`")
413
+ peft_config.target_modules = set(
414
+ TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config["model_type"]]
415
+ )
416
+ return peft_config
417
+
418
+ def _unload_and_optionally_merge(
419
+ self,
420
+ merge=True,
421
+ progressbar: bool = False,
422
+ safe_merge: bool = False,
423
+ adapter_names: Optional[list[str]] = None,
424
+ ):
425
+ if merge:
426
+ self._check_merge_allowed()
427
+
428
+ key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key]
429
+ desc = "Unloading " + ("and merging " if merge else "") + "model"
430
+ for key in tqdm(key_list, disable=not progressbar, desc=desc):
431
+ try:
432
+ parent, target, target_name = _get_submodules(self.model, key)
433
+ except AttributeError:
434
+ continue
435
+ with onload_layer(target):
436
+ if hasattr(target, "base_layer"):
437
+ if merge:
438
+ target.merge(safe_merge=safe_merge, adapter_names=adapter_names)
439
+ self._replace_module(parent, target_name, target.get_base_layer(), target)
440
+ elif isinstance(target, ModulesToSaveWrapper):
441
+ # save any additional trainable modules part of `modules_to_save`
442
+ new_module = target.modules_to_save[target.active_adapter]
443
+ if hasattr(new_module, "base_layer"):
444
+ # check if the module is itself a tuner layer
445
+ if merge:
446
+ new_module.merge(safe_merge=safe_merge, adapter_names=adapter_names)
447
+ new_module = new_module.get_base_layer()
448
+ setattr(parent, target_name, new_module)
449
+
450
+ return self.model
451
+
452
+ def add_weighted_adapter(
453
+ self,
454
+ adapters,
455
+ weights,
456
+ adapter_name,
457
+ combination_type="svd",
458
+ svd_rank=None,
459
+ svd_clamp=None,
460
+ svd_full_matrices=True,
461
+ svd_driver=None,
462
+ density=None,
463
+ majority_sign_method: Literal["total", "frequency"] = "total",
464
+ ) -> None:
465
+ """
466
+ This method adds a new adapter by merging the given adapters with the given weights.
467
+
468
+ When using the `cat` combination_type you should be aware that rank of the resulting adapter will be equal to
469
+ the sum of all adapters ranks. So it's possible that the mixed adapter may become too big and result in OOM
470
+ errors.
471
+
472
+ Args:
473
+ adapters (`list`):
474
+ List of adapter names to be merged.
475
+ weights (`list`):
476
+ List of weights for each adapter.
477
+ adapter_name (`str`):
478
+ Name of the new adapter.
479
+ combination_type (`str`):
480
+ The merging type can be one of [`svd`, `linear`, `cat`, `ties`, `ties_svd`, `dare_ties`, `dare_linear`,
481
+ `dare_ties_svd`, `dare_linear_svd`, `magnitude_prune`, `magnitude_prune_svd`]. When using the `cat`
482
+ combination_type, the rank of the resulting adapter is equal to the sum of all adapters ranks (the
483
+ mixed adapter may be too big and result in OOM errors).
484
+ svd_rank (`int`, *optional*):
485
+ Rank of output adapter for svd. If None provided, will use max rank of merging adapters.
486
+ svd_clamp (`float`, *optional*):
487
+ A quantile threshold for clamping SVD decomposition output. If None is provided, do not perform
488
+ clamping. Defaults to None.
489
+ svd_full_matrices (`bool`, *optional*):
490
+ Controls whether to compute the full or reduced SVD, and consequently, the shape of the returned
491
+ tensors U and Vh. Defaults to True.
492
+ svd_driver (`str`, *optional*):
493
+ Name of the cuSOLVER method to be used. This keyword argument only works when merging on CUDA. Can be
494
+ one of [None, `gesvd`, `gesvdj`, `gesvda`]. For more info please refer to `torch.linalg.svd`
495
+ documentation. Defaults to None.
496
+ density (`float`, *optional*):
497
+ Value between 0 and 1. 0 means all values are pruned and 1 means no values are pruned. Should be used
498
+ with [`ties`, `ties_svd`, `dare_ties`, `dare_linear`, `dare_ties_svd`, `dare_linear_svd`,
499
+ `magnintude_prune`, `magnitude_prune_svd`]
500
+ majority_sign_method (`str`):
501
+ The method, should be one of ["total", "frequency"], to use to get the magnitude of the sign values.
502
+ Should be used with [`ties`, `ties_svd`, `dare_ties`, `dare_ties_svd`]
503
+ """
504
+
505
+ if adapter_name in list(self.peft_config.keys()):
506
+ return
507
+ for adapter in adapters:
508
+ if adapter not in list(self.peft_config.keys()):
509
+ raise ValueError(f"Adapter {adapter} does not exist")
510
+
511
+ # if there is only one adapter, we can only use linear merging
512
+ combination_type = "linear" if len(adapters) == 1 else combination_type
513
+
514
+ adapters_ranks = [self.peft_config[adapter].r for adapter in adapters]
515
+ if combination_type in ("linear", "ties", "dare_ties", "dare_linear", "magnitude_prune"):
516
+ # all adapters ranks should be same, new rank is just this value
517
+ if len(set(adapters_ranks)) != 1:
518
+ raise ValueError(
519
+ "All adapters must have the same r value when using combination_type linear, ties, dare_ties or dare_linear."
520
+ )
521
+ new_rank = adapters_ranks[0]
522
+ elif combination_type == "cat":
523
+ # adapters ranks may be different, new rank is sum of all ranks
524
+ # be careful, because output adapter rank may be really big if mixing a lot of adapters
525
+ new_rank = sum(adapters_ranks)
526
+ elif combination_type.endswith("svd"):
527
+ # new rank is the max of all ranks of the adapters if not provided
528
+ new_rank = svd_rank or max(adapters_ranks)
529
+ else:
530
+ raise ValueError(f"Invalid combination_type: {combination_type}")
531
+
532
+ target_module_types = [type(self.peft_config[adapter].target_modules) for adapter in adapters]
533
+ if not target_module_types:
534
+ raise ValueError(f"Found no adapter matching the names in {adapters}")
535
+ if len(set(target_module_types)) > 1:
536
+ raise ValueError(
537
+ "all adapter configs should follow the same target modules type. "
538
+ "Combining adapters with `target_modules` type being a mix of list/set and string is not supported."
539
+ )
540
+
541
+ if target_module_types[0] == str:
542
+ new_target_modules = "|".join(f"({self.peft_config[adapter].target_modules})" for adapter in adapters)
543
+ elif target_module_types[0] == set:
544
+ new_target_modules = reduce(
545
+ operator.or_, (self.peft_config[adapter].target_modules for adapter in adapters)
546
+ )
547
+ else:
548
+ raise TypeError(f"Invalid type {target_module_types[0]} found in target_modules")
549
+
550
+ self.peft_config[adapter_name] = replace(
551
+ self.peft_config[adapters[0]],
552
+ r=new_rank,
553
+ lora_alpha=new_rank,
554
+ target_modules=new_target_modules,
555
+ )
556
+ self.inject_adapter(self.model, adapter_name)
557
+
558
+ # Do we really need that?
559
+ _freeze_adapter(self.model, adapter_name)
560
+
561
+ key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key]
562
+ for key in key_list:
563
+ _, target, _ = _get_submodules(self.model, key)
564
+ if isinstance(target, LoraLayer):
565
+ if adapter_name in target.lora_A:
566
+ target_lora_A = target.lora_A[adapter_name].weight
567
+ target_lora_B = target.lora_B[adapter_name].weight
568
+ elif adapter_name in target.lora_embedding_A:
569
+ target_lora_A = target.lora_embedding_A[adapter_name]
570
+ target_lora_B = target.lora_embedding_B[adapter_name]
571
+ else:
572
+ continue
573
+
574
+ target_lora_A.data = target_lora_A.data * 0.0
575
+ target_lora_B.data = target_lora_B.data * 0.0
576
+ if combination_type == "cat":
577
+ loras_A, loras_B = [], []
578
+ for adapter, weight in zip(adapters, weights):
579
+ if adapter in target.lora_A:
580
+ current_adapter_lora_A = target.lora_A[adapter].weight
581
+ current_adapter_lora_B = target.lora_B[adapter].weight
582
+ elif adapter in target.lora_embedding_A:
583
+ current_adapter_lora_A = target.lora_embedding_A[adapter]
584
+ current_adapter_lora_B = target.lora_embedding_B[adapter]
585
+ else:
586
+ continue
587
+ loras_A.append(current_adapter_lora_A.data * weight * target.scaling[adapter])
588
+ loras_B.append(current_adapter_lora_B.data)
589
+
590
+ if len(loras_A) == 0:
591
+ raise ValueError("No matching LoRAs found. Please raise an issue on GitHub.")
592
+ loras_A = torch.cat(loras_A, dim=0)
593
+ loras_B = torch.cat(loras_B, dim=1)
594
+ target_lora_A.data[: loras_A.shape[0], :] = loras_A
595
+ target_lora_B.data[:, : loras_B.shape[1]] = loras_B
596
+ elif combination_type in [
597
+ "svd",
598
+ "ties_svd",
599
+ "dare_linear_svd",
600
+ "dare_ties_svd",
601
+ "magnitude_prune_svd",
602
+ ]:
603
+ target_lora_A.data, target_lora_B.data = self._svd_generalized_task_arithmetic_weighted_adapter(
604
+ combination_type,
605
+ adapters,
606
+ weights,
607
+ new_rank,
608
+ target,
609
+ target_lora_A,
610
+ target_lora_B,
611
+ density,
612
+ majority_sign_method,
613
+ svd_clamp,
614
+ full_matrices=svd_full_matrices,
615
+ driver=svd_driver,
616
+ )
617
+ elif combination_type in ["linear", "ties", "dare_linear", "dare_ties", "magnitude_prune"]:
618
+ target_lora_A.data, target_lora_B.data = self._generalized_task_arithmetic_weighted_adapter(
619
+ combination_type, adapters, weights, target, density, majority_sign_method
620
+ )
621
+
622
+ def _svd_generalized_task_arithmetic_weighted_adapter(
623
+ self,
624
+ combination_type,
625
+ adapters,
626
+ weights,
627
+ new_rank,
628
+ target,
629
+ target_lora_A,
630
+ target_lora_B,
631
+ density,
632
+ majority_sign_method,
633
+ clamp=None,
634
+ full_matrices=True,
635
+ driver=None,
636
+ ):
637
+ valid_adapters = []
638
+ valid_weights = []
639
+ is_embedding = any(adapter in target.lora_embedding_A for adapter in adapters)
640
+ for adapter, weight in zip(adapters, weights):
641
+ if adapter in target.lora_A or adapter in target.lora_embedding_A:
642
+ valid_adapters.append(adapter)
643
+ valid_weights.append(weight * target.scaling[adapter])
644
+
645
+ # if no valid adapter, nothing to do
646
+ if len(valid_adapters) == 0:
647
+ raise ValueError("No matching LoRAs found. Please raise an issue on Github.")
648
+ delta_weight = [target.get_delta_weight(adapter) for adapter in valid_adapters]
649
+ valid_weights = torch.tensor(valid_weights).to(delta_weight[0].device)
650
+ if combination_type == "svd":
651
+ delta_weight = task_arithmetic(delta_weight, valid_weights)
652
+ elif combination_type == "ties_svd":
653
+ delta_weight = ties(delta_weight, valid_weights, density, majority_sign_method)
654
+ elif combination_type == "dare_linear_svd":
655
+ delta_weight = dare_linear(delta_weight, valid_weights, density)
656
+ elif combination_type == "dare_ties_svd":
657
+ delta_weight = dare_ties(delta_weight, valid_weights, density, majority_sign_method)
658
+ elif combination_type == "magnitude_prune_svd":
659
+ delta_weight = magnitude_prune(delta_weight, valid_weights, density)
660
+ else:
661
+ raise ValueError(f"Invalid value passed to combination type: {combination_type}")
662
+
663
+ conv2d = isinstance(target, Conv2d)
664
+ if conv2d:
665
+ conv2d_1x1 = target.weight.size()[2:4] == (1, 1)
666
+ if not conv2d_1x1:
667
+ delta_weight = delta_weight.flatten(start_dim=1)
668
+ else:
669
+ delta_weight = delta_weight.squeeze()
670
+ if (hasattr(target, "fan_in_fan_out") and target.fan_in_fan_out) or is_embedding:
671
+ delta_weight = delta_weight.T
672
+
673
+ # based on https://github.com/kohya-ss/sd-scripts/blob/main/networks/svd_merge_lora.py#L114-L131
674
+ U, S, Vh = torch.linalg.svd(delta_weight, full_matrices=full_matrices, driver=driver)
675
+ U = U[:, :new_rank]
676
+ S = S[:new_rank]
677
+ U = U @ torch.diag(S)
678
+ Vh = Vh[:new_rank, :]
679
+ if clamp is not None:
680
+ dist = torch.cat([U.flatten(), Vh.flatten()])
681
+ hi_val = torch.quantile(dist, clamp)
682
+ low_val = -hi_val
683
+ U = U.clamp(low_val, hi_val)
684
+ Vh = Vh.clamp(low_val, hi_val)
685
+ if conv2d:
686
+ U = U.reshape(target_lora_B.data.shape)
687
+ Vh = Vh.reshape(target_lora_A.data.shape)
688
+ return Vh, U
689
+
690
+ def _generalized_task_arithmetic_weighted_adapter(
691
+ self,
692
+ combination_type,
693
+ adapters,
694
+ weights,
695
+ target,
696
+ density,
697
+ majority_sign_method,
698
+ ):
699
+ # account weights for LoRA A and B layers.
700
+ valid_weights = []
701
+ lora_A_deltas = []
702
+ lora_B_deltas = []
703
+ for adapter, weight in zip(adapters, weights):
704
+ if adapter in target.lora_A:
705
+ current_adapter_lora_A = target.lora_A[adapter].weight
706
+ current_adapter_lora_B = target.lora_B[adapter].weight
707
+ elif adapter in target.lora_embedding_A:
708
+ current_adapter_lora_A = target.lora_embedding_A[adapter]
709
+ current_adapter_lora_B = target.lora_embedding_B[adapter]
710
+ else:
711
+ continue
712
+ valid_weights.append(math.sqrt(weight * target.scaling[adapter]))
713
+ lora_A_deltas.append(current_adapter_lora_A.data)
714
+ lora_B_deltas.append(current_adapter_lora_B.data)
715
+ valid_weights = torch.tensor(valid_weights).to(lora_A_deltas[0].device)
716
+ lora_deltas = [lora_A_deltas, lora_B_deltas]
717
+ dtype = lora_A_deltas[0].dtype
718
+ for i, task_tensors in enumerate(lora_deltas):
719
+ if combination_type == "linear":
720
+ lora_deltas[i] = task_arithmetic(task_tensors, valid_weights)
721
+ elif combination_type == "ties":
722
+ lora_deltas[i] = ties(task_tensors, valid_weights, density, majority_sign_method)
723
+ elif combination_type == "dare_linear":
724
+ lora_deltas[i] = dare_linear(task_tensors, valid_weights, density)
725
+ elif combination_type == "dare_ties":
726
+ lora_deltas[i] = dare_ties(task_tensors, valid_weights, density, majority_sign_method)
727
+ elif combination_type == "magnitude_prune":
728
+ lora_deltas[i] = magnitude_prune(task_tensors, valid_weights, density)
729
+ else:
730
+ raise ValueError("Invalid combination type")
731
+ lora_deltas = [delta.to(dtype) for delta in lora_deltas]
732
+ return lora_deltas
733
+
734
+ def delete_adapter(self, adapter_name: str) -> None:
735
+ """
736
+ Deletes an existing adapter.
737
+
738
+ Args:
739
+ adapter_name (str): Name of the adapter to be deleted.
740
+ """
741
+ if adapter_name not in list(self.peft_config.keys()):
742
+ raise ValueError(f"Adapter {adapter_name} does not exist")
743
+ del self.peft_config[adapter_name]
744
+
745
+ key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key]
746
+ new_adapter = None
747
+ for key in key_list:
748
+ _, target, _ = _get_submodules(self.model, key)
749
+ if isinstance(target, LoraLayer):
750
+ target.delete_adapter(adapter_name)
751
+ if new_adapter is None:
752
+ new_adapter = target.active_adapters[:]
753
+
754
+ self.active_adapter = new_adapter or []
755
+
756
+ def merge_and_unload(
757
+ self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None
758
+ ) -> torch.nn.Module:
759
+ r"""
760
+ This method merges the LoRa layers into the base model. This is needed if someone wants to use the base model
761
+ as a standalone model.
762
+
763
+ Args:
764
+ progressbar (`bool`):
765
+ whether to show a progressbar indicating the unload and merge process
766
+ safe_merge (`bool`):
767
+ whether to activate the safe merging check to check if there is any potential Nan in the adapter
768
+ weights
769
+ adapter_names (`List[str]`, *optional*):
770
+ The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
771
+ to `None`.
772
+ Example:
773
+
774
+ ```py
775
+ >>> from transformers import AutoModelForCausalLM
776
+ >>> from peft import PeftModel
777
+
778
+ >>> base_model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-40b")
779
+ >>> peft_model_id = "smangrul/falcon-40B-int4-peft-lora-sfttrainer-sample"
780
+ >>> model = PeftModel.from_pretrained(base_model, peft_model_id)
781
+ >>> merged_model = model.merge_and_unload()
782
+ ```
783
+ """
784
+ return self._unload_and_optionally_merge(
785
+ progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names
786
+ )
787
+
788
+ def unload(self) -> torch.nn.Module:
789
+ """
790
+ Gets back the base model by removing all the lora modules without merging. This gives back the original base
791
+ model.
792
+ """
793
+ return self._unload_and_optionally_merge(merge=False)
env-llmeval/lib/python3.10/site-packages/peft/tuners/lora/tp_layer.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import importlib
16
+ import warnings
17
+ from typing import Any, Optional
18
+
19
+ import torch
20
+ import torch.nn as nn
21
+ import torch.nn.init as init
22
+
23
+ from peft.tuners.tuners_utils import BaseTunerLayer
24
+
25
+ from .layer import LoraLayer
26
+
27
+
28
+ class LoraParallelLinear(nn.Module, LoraLayer):
29
+ """
30
+ When the target layer parallel_linear is RowParallelLinear, in order to keep the input and output shapes
31
+ consistent, we need to split the lora matrix A into rows, and the lora_B at this time should be a complete linear
32
+ layer; In the same way, when the target layer is ColumnParallelLinear, we perform column segmentation on lora_B,
33
+ while lora_A is still a complete linear layer.
34
+ """
35
+
36
+ def __init__(
37
+ self,
38
+ base_layer,
39
+ adapter_name: str,
40
+ backend,
41
+ r: int = 0,
42
+ lora_alpha: int = 1,
43
+ lora_dropout: float = 0.0,
44
+ fan_in_fan_out: bool = False,
45
+ init_lora_weights: bool = True,
46
+ use_rslora: bool = False,
47
+ use_dora: bool = False,
48
+ **kwargs,
49
+ ):
50
+ super().__init__()
51
+ LoraLayer.__init__(self, base_layer=base_layer)
52
+
53
+ if use_dora:
54
+ raise ValueError(f"{self.__class__.__name__} does not support DoRA yet, please set it to False")
55
+
56
+ self.backend = backend
57
+ self.is_parallel_a = isinstance(base_layer, backend.RowParallelLinear)
58
+ self.fan_in_fan_out = fan_in_fan_out
59
+ self._active_adapter = adapter_name
60
+
61
+ megatron_config = kwargs["megatron_config"]
62
+ parallel_linear_kwargs = {"megatron_config": megatron_config}
63
+ init_method = init.xavier_normal_
64
+ if hasattr(megatron_config, "init_method"):
65
+ init_method = megatron_config.init_method
66
+ input_is_parallel = True
67
+ gather_output = False
68
+ if isinstance(base_layer, self.backend.RowParallelLinear):
69
+ input_is_parallel = base_layer.input_is_parallel
70
+ else:
71
+ gather_output = base_layer.gather_output
72
+ self.update_layer(
73
+ adapter_name,
74
+ r,
75
+ lora_alpha=lora_alpha,
76
+ lora_dropout=lora_dropout,
77
+ init_lora_weights=init_lora_weights,
78
+ use_rslora=use_rslora,
79
+ use_dora=use_dora,
80
+ init_method=init_method,
81
+ input_is_parallel=input_is_parallel,
82
+ gather_output=gather_output,
83
+ **parallel_linear_kwargs,
84
+ )
85
+
86
+ self.is_target_conv_1d_layer = False
87
+
88
+ def update_layer(
89
+ self,
90
+ adapter_name,
91
+ r,
92
+ lora_alpha,
93
+ lora_dropout,
94
+ init_lora_weights,
95
+ use_rslora,
96
+ use_dora=False,
97
+ init_method=init.xavier_normal_,
98
+ input_is_parallel=True,
99
+ gather_output=False,
100
+ **parallel_linear_kwargs,
101
+ ):
102
+ if r <= 0:
103
+ raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
104
+ self.r[adapter_name] = r
105
+ self.lora_alpha[adapter_name] = lora_alpha
106
+ if lora_dropout > 0.0:
107
+ lora_dropout_layer = nn.Dropout(p=lora_dropout)
108
+ else:
109
+ lora_dropout_layer = nn.Identity()
110
+
111
+ self.lora_dropout[adapter_name] = lora_dropout_layer
112
+
113
+ megatron_config = parallel_linear_kwargs["megatron_config"]
114
+ # lora needs to be forced to upgrade to 32-bit precision, otherwise it will overflow
115
+ megatron_config.params_dtype = torch.float32
116
+ if self.is_parallel_a:
117
+ lora_a = self.backend.RowParallelLinear(
118
+ input_size=self.in_features,
119
+ output_size=r,
120
+ bias=False,
121
+ input_is_parallel=input_is_parallel,
122
+ skip_bias_add=True,
123
+ init_method=init_method,
124
+ config=megatron_config,
125
+ )
126
+ lora_b = nn.Linear(in_features=r, out_features=self.out_features, bias=False, dtype=torch.float32)
127
+ else:
128
+ lora_a = nn.Linear(in_features=self.in_features, out_features=r, bias=False, dtype=torch.float32)
129
+ lora_b = self.backend.ColumnParallelLinear(
130
+ input_size=r,
131
+ output_size=self.out_features,
132
+ bias=False,
133
+ gather_output=gather_output,
134
+ init_method=init_method,
135
+ config=megatron_config,
136
+ )
137
+ self.lora_A[adapter_name] = lora_a
138
+ self.lora_B[adapter_name] = lora_b
139
+ if use_rslora:
140
+ self.scaling[adapter_name] = lora_alpha / (r**0.5)
141
+ else:
142
+ self.scaling[adapter_name] = lora_alpha / r
143
+ if init_lora_weights:
144
+ self.reset_lora_parameters(adapter_name, init_lora_weights)
145
+
146
+ weight = getattr(self.get_base_layer(), "weight", None)
147
+ if weight is not None:
148
+ # the layer is already completely initialized, this is an update
149
+ if weight.dtype.is_floating_point or weight.dtype.is_complex:
150
+ self.to(weight.device, dtype=weight.dtype)
151
+ else:
152
+ self.to(weight.device)
153
+ self.set_adapter(self.active_adapters)
154
+
155
+ def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any):
156
+ previous_dtype = x.dtype
157
+ # If weight is used for matrix multiplication here, the final aggregation operation of the original
158
+ # parallel_linear layer will be missing, so we need to directly call its forward function to obtain the
159
+ # output of the original parallel_linear layer.
160
+ if self.disable_adapters:
161
+ if self.merged:
162
+ self.unmerge()
163
+ result, bias = self.base_layer(x, *args, **kwargs)
164
+ elif self.merged:
165
+ result, bias = self.base_layer(x, *args, **kwargs)
166
+ else:
167
+ result, bias = self.base_layer(x, *args, **kwargs)
168
+ for active_adapter in self.active_adapters:
169
+ if active_adapter not in self.lora_A.keys():
170
+ continue
171
+ lora_A = self.lora_A[active_adapter]
172
+ lora_B = self.lora_B[active_adapter]
173
+ dropout = self.lora_dropout[active_adapter]
174
+ scaling = self.scaling[active_adapter]
175
+ x = x.to(lora_A.weight.dtype)
176
+
177
+ lora_result = lora_A(dropout(x))
178
+ if isinstance(lora_result, tuple):
179
+ lora_result = lora_result[0]
180
+ lora_result = lora_B(lora_result)
181
+ if isinstance(lora_result, tuple):
182
+ lora_result = lora_result[0]
183
+ lora_result = lora_result * scaling
184
+
185
+ result = result + lora_result
186
+
187
+ result = result.to(previous_dtype)
188
+ return result, bias
189
+
190
+
191
+ def dispatch_megatron(
192
+ target: torch.nn.Module,
193
+ adapter_name: str,
194
+ lora_config,
195
+ **kwargs: Any,
196
+ ) -> Optional[torch.nn.Module]:
197
+ new_module = None
198
+
199
+ if isinstance(target, BaseTunerLayer):
200
+ target_base_layer = target.get_base_layer()
201
+ else:
202
+ target_base_layer = target
203
+
204
+ if lora_config.megatron_config:
205
+ megatron_core = importlib.import_module(lora_config.megatron_core)
206
+ else:
207
+ megatron_core = None
208
+
209
+ if megatron_core and isinstance(
210
+ target_base_layer,
211
+ (megatron_core.tensor_parallel.ColumnParallelLinear, megatron_core.tensor_parallel.RowParallelLinear),
212
+ ):
213
+ megatron_kwargs = kwargs.copy()
214
+ megatron_config = lora_config.megatron_config
215
+ if isinstance(megatron_config, dict):
216
+ transformer_config_class = megatron_core.transformer.transformer_config.TransformerConfig
217
+ megatron_config = transformer_config_class(**lora_config.megatron_config)
218
+ megatron_kwargs["megatron_config"] = megatron_config
219
+ if megatron_kwargs["fan_in_fan_out"]:
220
+ warnings.warn(
221
+ "fan_in_fan_out is set to True but the target module is `ColumnParallelLinear` "
222
+ "or `RowParallelLinear`. "
223
+ "Setting fan_in_fan_out to False."
224
+ )
225
+ megatron_kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False
226
+ new_module = LoraParallelLinear(
227
+ base_layer=target, adapter_name=adapter_name, backend=megatron_core.tensor_parallel, **megatron_kwargs
228
+ )
229
+
230
+ return new_module