applied-ai-018 commited on
Commit
2f0dc34
·
verified ·
1 Parent(s): 5fef0d4

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__init__.py +13 -0
  2. env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__pycache__/__init__.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__pycache__/__version__.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__pycache__/_binary_ext_checker.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__pycache__/_func.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__pycache__/_mbstrdecoder.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__version__.py +6 -0
  8. env-llmeval/lib/python3.10/site-packages/mbstrdecoder/_binary_ext_checker.py +264 -0
  9. env-llmeval/lib/python3.10/site-packages/mbstrdecoder/_func.py +56 -0
  10. env-llmeval/lib/python3.10/site-packages/mbstrdecoder/_mbstrdecoder.py +291 -0
  11. env-llmeval/lib/python3.10/site-packages/mbstrdecoder/py.typed +0 -0
  12. env-llmeval/lib/python3.10/site-packages/peft/__init__.py +90 -0
  13. env-llmeval/lib/python3.10/site-packages/peft/auto.py +170 -0
  14. env-llmeval/lib/python3.10/site-packages/peft/config.py +270 -0
  15. env-llmeval/lib/python3.10/site-packages/peft/helpers.py +113 -0
  16. env-llmeval/lib/python3.10/site-packages/peft/import_utils.py +73 -0
  17. env-llmeval/lib/python3.10/site-packages/peft/mapping.py +168 -0
  18. env-llmeval/lib/python3.10/site-packages/peft/mixed_model.py +409 -0
  19. env-llmeval/lib/python3.10/site-packages/peft/peft_model.py +1986 -0
  20. env-llmeval/lib/python3.10/site-packages/peft/py.typed +0 -0
  21. env-llmeval/lib/python3.10/site-packages/peft/tuners/__init__.py +32 -0
  22. env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__init__.py +37 -0
  23. env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/__init__.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/bnb.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/config.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/gptq.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/layer.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/model.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/bnb.py +145 -0
  30. env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/config.py +52 -0
  31. env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/gptq.py +72 -0
  32. env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/layer.py +347 -0
  33. env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/model.py +346 -0
  34. env-llmeval/lib/python3.10/site-packages/peft/tuners/ia3/__init__.py +36 -0
  35. env-llmeval/lib/python3.10/site-packages/peft/tuners/ia3/bnb.py +129 -0
  36. env-llmeval/lib/python3.10/site-packages/peft/tuners/ia3/layer.py +307 -0
  37. env-llmeval/lib/python3.10/site-packages/peft/tuners/lycoris_utils.py +428 -0
  38. env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__init__.py +19 -0
  39. env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/__init__.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/config.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/model.cpython-310.pyc +0 -0
  42. env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/config.py +61 -0
  43. env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/model.py +115 -0
  44. env-llmeval/lib/python3.10/site-packages/peft/tuners/oft/__pycache__/config.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/peft/tuners/oft/__pycache__/layer.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/__init__.py +20 -0
  47. env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/__init__.cpython-310.pyc +0 -0
  48. env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/config.cpython-310.pyc +0 -0
  49. env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/layer.cpython-310.pyc +0 -0
  50. env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/model.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ .. codeauthor:: Tsuyoshi Hombashi <[email protected]>
3
+ """
4
+
5
+ from .__version__ import __author__, __copyright__, __email__, __license__, __version__
6
+ from ._func import detect_file_encoding
7
+ from ._mbstrdecoder import MultiByteStrDecoder
8
+
9
+
10
+ __all__ = (
11
+ "detect_file_encoding",
12
+ "MultiByteStrDecoder",
13
+ )
env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (514 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__pycache__/__version__.cpython-310.pyc ADDED
Binary file (383 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__pycache__/_binary_ext_checker.cpython-310.pyc ADDED
Binary file (1.85 kB). View file
 
env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__pycache__/_func.cpython-310.pyc ADDED
Binary file (1.59 kB). View file
 
env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__pycache__/_mbstrdecoder.cpython-310.pyc ADDED
Binary file (6.03 kB). View file
 
env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__version__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ __author__ = "Tsuyoshi Hombashi"
2
+ __copyright__ = f"Copyright 2016, {__author__}"
3
+ __license__ = "MIT License"
4
+ __version__ = "1.1.3"
5
+ __maintainer__ = __author__
6
+ __email__ = "[email protected]"
env-llmeval/lib/python3.10/site-packages/mbstrdecoder/_binary_ext_checker.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ .. codeauthor:: Tsuyoshi Hombashi <[email protected]>
3
+ """
4
+
5
+ import os.path
6
+
7
+
8
+ # list from https://github.com/sindresorhus/binary-extensions
9
+ binary_exts = (
10
+ "3dm",
11
+ "3ds",
12
+ "3g2",
13
+ "3gp",
14
+ "7z",
15
+ "a",
16
+ "aac",
17
+ "adp",
18
+ "ai",
19
+ "aif",
20
+ "aiff",
21
+ "alz",
22
+ "ape",
23
+ "apk",
24
+ "ar",
25
+ "arj",
26
+ "asf",
27
+ "au",
28
+ "avi",
29
+ "bak",
30
+ "baml",
31
+ "bh",
32
+ "bin",
33
+ "bk",
34
+ "bmp",
35
+ "btif",
36
+ "bz2",
37
+ "bzip2",
38
+ "cab",
39
+ "caf",
40
+ "cgm",
41
+ "class",
42
+ "cmx",
43
+ "cpio",
44
+ "cr2",
45
+ "cur",
46
+ "dat",
47
+ "dcm",
48
+ "deb",
49
+ "dex",
50
+ "djvu",
51
+ "dll",
52
+ "dmg",
53
+ "dng",
54
+ "doc",
55
+ "docm",
56
+ "docx",
57
+ "dot",
58
+ "dotm",
59
+ "dra",
60
+ "DS_Store",
61
+ "dsk",
62
+ "dts",
63
+ "dtshd",
64
+ "dvb",
65
+ "dwg",
66
+ "dxf",
67
+ "ecelp4800",
68
+ "ecelp7470",
69
+ "ecelp9600",
70
+ "egg",
71
+ "eol",
72
+ "eot",
73
+ "epub",
74
+ "exe",
75
+ "f4v",
76
+ "fbs",
77
+ "fh",
78
+ "fla",
79
+ "flac",
80
+ "fli",
81
+ "flv",
82
+ "fpx",
83
+ "fst",
84
+ "fvt",
85
+ "g3",
86
+ "gh",
87
+ "gif",
88
+ "graffle",
89
+ "gz",
90
+ "gzip",
91
+ "h261",
92
+ "h263",
93
+ "h264",
94
+ "icns",
95
+ "ico",
96
+ "ief",
97
+ "img",
98
+ "ipa",
99
+ "iso",
100
+ "jar",
101
+ "jpeg",
102
+ "jpg",
103
+ "jpgv",
104
+ "jpm",
105
+ "jxr",
106
+ "key",
107
+ "ktx",
108
+ "lha",
109
+ "lib",
110
+ "lvp",
111
+ "lz",
112
+ "lzh",
113
+ "lzma",
114
+ "lzo",
115
+ "m3u",
116
+ "m4a",
117
+ "m4v",
118
+ "mar",
119
+ "mdi",
120
+ "mht",
121
+ "mid",
122
+ "midi",
123
+ "mj2",
124
+ "mka",
125
+ "mkv",
126
+ "mmr",
127
+ "mng",
128
+ "mobi",
129
+ "mov",
130
+ "movie",
131
+ "mp3",
132
+ "mp4",
133
+ "mp4a",
134
+ "mpeg",
135
+ "mpg",
136
+ "mpga",
137
+ "mxu",
138
+ "nef",
139
+ "npx",
140
+ "numbers",
141
+ "nupkg",
142
+ "o",
143
+ "oga",
144
+ "ogg",
145
+ "ogv",
146
+ "otf",
147
+ "pages",
148
+ "pbm",
149
+ "pcx",
150
+ "pdb",
151
+ "pdf",
152
+ "pea",
153
+ "pgm",
154
+ "pic",
155
+ "png",
156
+ "pnm",
157
+ "pot",
158
+ "potm",
159
+ "potx",
160
+ "ppa",
161
+ "ppam",
162
+ "ppm",
163
+ "pps",
164
+ "ppsm",
165
+ "ppsx",
166
+ "ppt",
167
+ "pptm",
168
+ "pptx",
169
+ "psd",
170
+ "pya",
171
+ "pyc",
172
+ "pyo",
173
+ "pyv",
174
+ "qt",
175
+ "rar",
176
+ "ras",
177
+ "raw",
178
+ "resources",
179
+ "rgb",
180
+ "rip",
181
+ "rlc",
182
+ "rmf",
183
+ "rmvb",
184
+ "rtf",
185
+ "rz",
186
+ "s3m",
187
+ "s7z",
188
+ "scpt",
189
+ "sgi",
190
+ "shar",
191
+ "sil",
192
+ "sketch",
193
+ "slk",
194
+ "smv",
195
+ "snk",
196
+ "so",
197
+ "stl",
198
+ "suo",
199
+ "sub",
200
+ "swf",
201
+ "tar",
202
+ "tbz",
203
+ "tbz2",
204
+ "tga",
205
+ "tgz",
206
+ "thmx",
207
+ "tif",
208
+ "tiff",
209
+ "tlz",
210
+ "ttc",
211
+ "ttf",
212
+ "txz",
213
+ "udf",
214
+ "uvh",
215
+ "uvi",
216
+ "uvm",
217
+ "uvp",
218
+ "uvs",
219
+ "uvu",
220
+ "viv",
221
+ "vob",
222
+ "war",
223
+ "wav",
224
+ "wax",
225
+ "wbmp",
226
+ "wdp",
227
+ "weba",
228
+ "webm",
229
+ "webp",
230
+ "whl",
231
+ "wim",
232
+ "wm",
233
+ "wma",
234
+ "wmv",
235
+ "wmx",
236
+ "woff",
237
+ "woff2",
238
+ "wrm",
239
+ "wvx",
240
+ "xbm",
241
+ "xif",
242
+ "xla",
243
+ "xlam",
244
+ "xls",
245
+ "xlsb",
246
+ "xlsm",
247
+ "xlsx",
248
+ "xlt",
249
+ "xltm",
250
+ "xltx",
251
+ "xm",
252
+ "xmind",
253
+ "xpi",
254
+ "xpm",
255
+ "xwd",
256
+ "xz",
257
+ "z",
258
+ "zip",
259
+ "zipx",
260
+ )
261
+
262
+
263
+ def is_binary_ext_path(filepath) -> bool:
264
+ return os.path.splitext(filepath)[1].lstrip(".") in binary_exts
env-llmeval/lib/python3.10/site-packages/mbstrdecoder/_func.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ .. codeauthor:: Tsuyoshi Hombashi <[email protected]>
3
+ """
4
+
5
+ import os
6
+ import stat
7
+ from errno import EBADF, ENOENT, ENOTDIR
8
+ from typing import Optional, Union
9
+
10
+ from ._binary_ext_checker import is_binary_ext_path
11
+
12
+
13
+ def is_fifo(file_path: Union[int, bytes, str]) -> bool:
14
+ try:
15
+ return stat.S_ISFIFO(os.stat(file_path).st_mode)
16
+ except OSError as e:
17
+ if e.errno not in (ENOENT, ENOTDIR, EBADF):
18
+ raise
19
+
20
+ return False
21
+ except ValueError:
22
+ return False
23
+
24
+
25
+ def to_codec_name(name: Optional[str]) -> Optional[str]:
26
+ if not name:
27
+ return None
28
+
29
+ return name.lower().replace("-", "_")
30
+
31
+
32
+ def detect_file_encoding(file_path) -> Optional[str]:
33
+ from chardet.universaldetector import UniversalDetector
34
+
35
+ if not os.path.isfile(file_path) or is_binary_ext_path(file_path) or is_fifo(file_path):
36
+ return None
37
+
38
+ detector = UniversalDetector()
39
+ READ_SIZE = 4 * 1024
40
+
41
+ try:
42
+ with open(file_path, mode="rb") as f:
43
+ while True:
44
+ binary = f.read(READ_SIZE)
45
+ if not binary:
46
+ break
47
+
48
+ detector.feed(binary)
49
+ if detector.done:
50
+ break
51
+ except OSError:
52
+ return None
53
+ finally:
54
+ detector.close()
55
+
56
+ return to_codec_name(detector.result.get("encoding"))
env-llmeval/lib/python3.10/site-packages/mbstrdecoder/_mbstrdecoder.py ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ .. codeauthor:: Tsuyoshi Hombashi <[email protected]>
3
+ """
4
+
5
+ import copy
6
+ import re
7
+ from typing import List, Optional, Sequence
8
+
9
+ from ._func import to_codec_name
10
+
11
+
12
+ def b(s: str) -> bytes:
13
+ return s.encode("latin-1")
14
+
15
+
16
+ class MultiByteStrDecoder:
17
+ """
18
+ Reference:
19
+ https://docs.python.org/3/library/codecs.html
20
+ """
21
+
22
+ __CODECS = [
23
+ "utf_7",
24
+ "utf_8",
25
+ "utf_8_sig",
26
+ "utf_16",
27
+ "utf_16_be",
28
+ "utf_16_le",
29
+ "utf_32",
30
+ "utf_32_be",
31
+ "utf_32_le",
32
+ "big5",
33
+ "big5hkscs",
34
+ "cp037",
35
+ "cp424",
36
+ "cp437",
37
+ "cp500",
38
+ "cp720",
39
+ "cp737",
40
+ "cp775",
41
+ "cp850",
42
+ "cp852",
43
+ "cp855",
44
+ "cp856",
45
+ "cp857",
46
+ "cp858",
47
+ "cp860",
48
+ "cp861",
49
+ "cp862",
50
+ "cp863",
51
+ "cp864",
52
+ "cp865",
53
+ "cp866",
54
+ "cp869",
55
+ "cp874",
56
+ "cp875",
57
+ "cp932",
58
+ "cp949",
59
+ "cp950",
60
+ "cp1006",
61
+ "cp1026",
62
+ "cp1140",
63
+ "cp1250",
64
+ "cp1251",
65
+ "cp1252",
66
+ "cp1253",
67
+ "cp1254",
68
+ "cp1255",
69
+ "cp1256",
70
+ "cp1257",
71
+ "cp1258",
72
+ "euc_jp",
73
+ "euc_jis_2004",
74
+ "euc_jisx0213",
75
+ "euc_kr",
76
+ "gb2312",
77
+ "gbk",
78
+ "gb18030",
79
+ "hz",
80
+ "iso2022_jp",
81
+ "iso2022_jp_1",
82
+ "iso2022_jp_2",
83
+ "iso2022_jp_2004",
84
+ "iso2022_jp_3",
85
+ "iso2022_jp_ext",
86
+ "iso2022_kr",
87
+ "latin_1",
88
+ "iso8859_2",
89
+ "iso8859_3",
90
+ "iso8859_4",
91
+ "iso8859_5",
92
+ "iso8859_6",
93
+ "iso8859_7",
94
+ "iso8859_8",
95
+ "iso8859_9",
96
+ "iso8859_10",
97
+ "iso8859_11",
98
+ "iso8859_13",
99
+ "iso8859_14",
100
+ "iso8859_15",
101
+ "iso8859_16",
102
+ "johab",
103
+ "koi8_r",
104
+ "koi8_u",
105
+ "mac_cyrillic",
106
+ "mac_greek",
107
+ "mac_iceland",
108
+ "mac_latin2",
109
+ "mac_roman",
110
+ "mac_turkish",
111
+ "ptcp154",
112
+ "shift_jis",
113
+ "shift_jis_2004",
114
+ "shift_jisx0213",
115
+ "base64_codec",
116
+ "bz2_codec",
117
+ "hex_codec",
118
+ "idna",
119
+ "mbcs",
120
+ "palmos",
121
+ "punycode",
122
+ "quopri_codec",
123
+ "raw_unicode_escape",
124
+ "rot_13",
125
+ "string_escape",
126
+ "unicode_escape",
127
+ "unicode_internal",
128
+ "uu_codec",
129
+ "zlib_codec",
130
+ ]
131
+ __RE_UTF7 = re.compile(b("[+].*?[-]"))
132
+
133
+ @property
134
+ def unicode_str(self) -> str:
135
+ return self.__unicode_str
136
+
137
+ @property
138
+ def codec(self) -> Optional[str]:
139
+ return self.__codec
140
+
141
+ def __init__(self, value, codec_candidates: Optional[Sequence[str]] = None) -> None:
142
+ self.__encoded_str = value
143
+ self.__codec: Optional[str] = None
144
+ if codec_candidates is None:
145
+ self.__codec_candidate_list: List[str] = []
146
+ else:
147
+ self.__codec_candidate_list = list(codec_candidates)
148
+
149
+ self.__validate_str()
150
+
151
+ self.__unicode_str = self.__to_unicode()
152
+
153
+ def __repr__(self) -> str:
154
+ return f"codec={self.codec:s}, unicode={self.unicode_str:s}"
155
+
156
+ def __validate_str(self) -> None:
157
+ if isinstance(self.__encoded_str, (str, bytes)):
158
+ return
159
+
160
+ raise ValueError(f"value must be a string: actual={type(self.__encoded_str)}")
161
+
162
+ def __is_buffer(self) -> bool:
163
+ return isinstance(self.__encoded_str, memoryview)
164
+
165
+ def __is_multibyte_utf7(self, encoded_str) -> bool:
166
+ if self.__codec != "utf_7":
167
+ return False
168
+
169
+ utf7_symbol_count = encoded_str.count(b("+"))
170
+ if utf7_symbol_count <= 0:
171
+ return False
172
+
173
+ if utf7_symbol_count != encoded_str.count(b("-")):
174
+ return False
175
+
176
+ return utf7_symbol_count == len(self.__RE_UTF7.findall(encoded_str))
177
+
178
+ def __get_encoded_str(self) -> str:
179
+ if self.__is_buffer():
180
+ return str(self.__encoded_str)
181
+
182
+ return self.__encoded_str
183
+
184
+ @staticmethod
185
+ def __detect_encoding_helper(encoded_str) -> Optional[str]:
186
+ import chardet
187
+
188
+ try:
189
+ detect = chardet.detect(encoded_str)
190
+ except TypeError:
191
+ detect = {} # type: ignore
192
+
193
+ detect_encoding = detect.get("encoding")
194
+ confidence = detect.get("confidence")
195
+
196
+ if detect_encoding not in ["ascii", "utf-8"] and confidence and confidence > 0.7:
197
+ # utf7 tend to be misrecognized as ascii
198
+ return detect_encoding
199
+
200
+ return None
201
+
202
+ def __get_codec_candidate_list(self, encoded_str) -> List[str]:
203
+ codec_candidate_list = copy.deepcopy(self.__CODECS)
204
+ detect_encoding = self.__detect_encoding_helper(encoded_str)
205
+
206
+ if detect_encoding:
207
+ try:
208
+ codec_candidate_list.remove(detect_encoding)
209
+ except ValueError:
210
+ pass
211
+
212
+ codec_candidate_list.insert(0, detect_encoding)
213
+
214
+ for codec_candidate in self.__codec_candidate_list:
215
+ try:
216
+ codec_candidate_list.remove(codec_candidate)
217
+ except ValueError:
218
+ pass
219
+
220
+ return self.__codec_candidate_list + codec_candidate_list
221
+
222
+ def __to_unicode(self):
223
+ encoded_str = self.__get_encoded_str()
224
+
225
+ if encoded_str == b"":
226
+ self.__codec = "unicode"
227
+ return ""
228
+
229
+ for codec in self.__get_codec_candidate_list(encoded_str):
230
+ if not codec:
231
+ continue
232
+
233
+ try:
234
+ self.__codec = to_codec_name(codec)
235
+ decoded_str = encoded_str.decode(codec)
236
+ break
237
+ except UnicodeDecodeError:
238
+ self.__codec = None
239
+ continue
240
+ except AttributeError:
241
+ if isinstance(encoded_str, str):
242
+ # already a unicode string (python 3)
243
+ self.__codec = "unicode"
244
+
245
+ if not encoded_str:
246
+ return encoded_str
247
+
248
+ return encoded_str
249
+
250
+ self.__codec = None
251
+
252
+ try:
253
+ return f"{encoded_str}"
254
+ except UnicodeDecodeError:
255
+ # some of the objects that cannot convertible to a string
256
+ # may reach this line
257
+ raise TypeError("argument must be a string")
258
+ else:
259
+ self.__codec = None
260
+
261
+ try:
262
+ message = f"unknown codec: encoded_str={encoded_str}"
263
+ except UnicodeDecodeError:
264
+ message = f"unknown codec: value-type={type(encoded_str)}"
265
+
266
+ raise UnicodeDecodeError(message)
267
+
268
+ if self.codec == "utf_7":
269
+ return self.__process_utf7(encoded_str, decoded_str)
270
+
271
+ return decoded_str
272
+
273
+ def __process_utf7(self, encoded_str, decoded_str) -> str:
274
+ if not encoded_str:
275
+ self.__codec = "unicode"
276
+
277
+ return encoded_str
278
+
279
+ if self.__is_multibyte_utf7(encoded_str):
280
+ try:
281
+ decoded_str.encode("ascii")
282
+
283
+ self.__codec = "ascii"
284
+
285
+ return encoded_str.decode("ascii")
286
+ except UnicodeEncodeError:
287
+ return decoded_str
288
+
289
+ self.__codec = "ascii"
290
+
291
+ return encoded_str.decode("ascii")
env-llmeval/lib/python3.10/site-packages/mbstrdecoder/py.typed ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/peft/__init__.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+ # There's no way to ignore "F401 '...' imported but unused" warnings in this
3
+ # module, but to preserve other warnings. So, don't check this module at all.
4
+
5
+ # coding=utf-8
6
+ # Copyright 2023-present the HuggingFace Inc. team.
7
+ #
8
+ # Licensed under the Apache License, Version 2.0 (the "License");
9
+ # you may not use this file except in compliance with the License.
10
+ # You may obtain a copy of the License at
11
+ #
12
+ # http://www.apache.org/licenses/LICENSE-2.0
13
+ #
14
+ # Unless required by applicable law or agreed to in writing, software
15
+ # distributed under the License is distributed on an "AS IS" BASIS,
16
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ # See the License for the specific language governing permissions and
18
+ # limitations under the License.
19
+
20
+ __version__ = "0.10.0"
21
+
22
+ from .auto import (
23
+ AutoPeftModel,
24
+ AutoPeftModelForCausalLM,
25
+ AutoPeftModelForSequenceClassification,
26
+ AutoPeftModelForSeq2SeqLM,
27
+ AutoPeftModelForTokenClassification,
28
+ AutoPeftModelForQuestionAnswering,
29
+ AutoPeftModelForFeatureExtraction,
30
+ )
31
+ from .mapping import (
32
+ MODEL_TYPE_TO_PEFT_MODEL_MAPPING,
33
+ PEFT_TYPE_TO_CONFIG_MAPPING,
34
+ get_peft_config,
35
+ get_peft_model,
36
+ inject_adapter_in_model,
37
+ )
38
+ from .mixed_model import PeftMixedModel
39
+ from .peft_model import (
40
+ PeftModel,
41
+ PeftModelForCausalLM,
42
+ PeftModelForSeq2SeqLM,
43
+ PeftModelForSequenceClassification,
44
+ PeftModelForTokenClassification,
45
+ PeftModelForQuestionAnswering,
46
+ PeftModelForFeatureExtraction,
47
+ )
48
+ from .tuners import (
49
+ AdaptionPromptConfig,
50
+ AdaptionPromptModel,
51
+ LoraConfig,
52
+ LoftQConfig,
53
+ LoraModel,
54
+ LoHaConfig,
55
+ LoHaModel,
56
+ LoKrConfig,
57
+ LoKrModel,
58
+ IA3Config,
59
+ IA3Model,
60
+ AdaLoraConfig,
61
+ AdaLoraModel,
62
+ PrefixEncoder,
63
+ PrefixTuningConfig,
64
+ PromptEmbedding,
65
+ PromptEncoder,
66
+ PromptEncoderConfig,
67
+ PromptEncoderReparameterizationType,
68
+ PromptTuningConfig,
69
+ PromptTuningInit,
70
+ MultitaskPromptTuningConfig,
71
+ MultitaskPromptTuningInit,
72
+ OFTConfig,
73
+ OFTModel,
74
+ PolyConfig,
75
+ PolyModel,
76
+ )
77
+ from .utils import (
78
+ TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING,
79
+ PeftType,
80
+ TaskType,
81
+ bloom_model_postprocess_past_key_value,
82
+ get_peft_model_state_dict,
83
+ prepare_model_for_kbit_training,
84
+ replace_lora_weights_loftq,
85
+ set_peft_model_state_dict,
86
+ shift_tokens_right,
87
+ load_peft_weights,
88
+ cast_mixed_precision_params,
89
+ )
90
+ from .config import PeftConfig, PromptLearningConfig
env-llmeval/lib/python3.10/site-packages/peft/auto.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from __future__ import annotations
16
+
17
+ import importlib
18
+ import os
19
+ from typing import Optional
20
+
21
+ from transformers import (
22
+ AutoModel,
23
+ AutoModelForCausalLM,
24
+ AutoModelForQuestionAnswering,
25
+ AutoModelForSeq2SeqLM,
26
+ AutoModelForSequenceClassification,
27
+ AutoModelForTokenClassification,
28
+ AutoTokenizer,
29
+ )
30
+
31
+ from .config import PeftConfig
32
+ from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING
33
+ from .peft_model import (
34
+ PeftModel,
35
+ PeftModelForCausalLM,
36
+ PeftModelForFeatureExtraction,
37
+ PeftModelForQuestionAnswering,
38
+ PeftModelForSeq2SeqLM,
39
+ PeftModelForSequenceClassification,
40
+ PeftModelForTokenClassification,
41
+ )
42
+ from .utils.constants import TOKENIZER_CONFIG_NAME
43
+ from .utils.other import check_file_exists_on_hf_hub
44
+
45
+
46
+ class _BaseAutoPeftModel:
47
+ _target_class = None
48
+ _target_peft_class = None
49
+
50
+ def __init__(self, *args, **kwargs):
51
+ # For consistency with transformers: https://github.com/huggingface/transformers/blob/91d7df58b6537d385e90578dac40204cb550f706/src/transformers/models/auto/auto_factory.py#L400
52
+ raise EnvironmentError( # noqa: UP024
53
+ f"{self.__class__.__name__} is designed to be instantiated "
54
+ f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or "
55
+ f"`{self.__class__.__name__}.from_config(config)` methods."
56
+ )
57
+
58
+ @classmethod
59
+ def from_pretrained(
60
+ cls,
61
+ pretrained_model_name_or_path,
62
+ adapter_name: str = "default",
63
+ is_trainable: bool = False,
64
+ config: Optional[PeftConfig] = None,
65
+ **kwargs,
66
+ ):
67
+ r"""
68
+ A wrapper around all the preprocessing steps a user needs to perform in order to load a PEFT model. The kwargs
69
+ are passed along to `PeftConfig` that automatically takes care of filtering the kwargs of the Hub methods and
70
+ the config object init.
71
+ """
72
+ peft_config = PeftConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
73
+ base_model_path = peft_config.base_model_name_or_path
74
+
75
+ task_type = getattr(peft_config, "task_type", None)
76
+
77
+ if cls._target_class is not None:
78
+ target_class = cls._target_class
79
+ elif cls._target_class is None and task_type is not None:
80
+ # this is only in the case where we use `AutoPeftModel`
81
+ raise ValueError(
82
+ "Cannot use `AutoPeftModel` with a task type, please use a specific class for your task type. (e.g. `AutoPeftModelForCausalLM` for `task_type='CAUSAL_LM'`)"
83
+ )
84
+
85
+ if task_type is not None:
86
+ expected_target_class = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[task_type]
87
+ if cls._target_peft_class.__name__ != expected_target_class.__name__:
88
+ raise ValueError(
89
+ f"Expected target PEFT class: {expected_target_class.__name__}, but you have asked for: {cls._target_peft_class.__name__ }"
90
+ " make sure that you are loading the correct model for your task type."
91
+ )
92
+ elif task_type is None and getattr(peft_config, "auto_mapping", None) is not None:
93
+ auto_mapping = getattr(peft_config, "auto_mapping", None)
94
+ base_model_class = auto_mapping["base_model_class"]
95
+ parent_library_name = auto_mapping["parent_library"]
96
+
97
+ parent_library = importlib.import_module(parent_library_name)
98
+ target_class = getattr(parent_library, base_model_class)
99
+ else:
100
+ raise ValueError(
101
+ "Cannot infer the auto class from the config, please make sure that you are loading the correct model for your task type."
102
+ )
103
+
104
+ base_model = target_class.from_pretrained(base_model_path, **kwargs)
105
+
106
+ tokenizer_exists = False
107
+ if os.path.exists(os.path.join(pretrained_model_name_or_path, TOKENIZER_CONFIG_NAME)):
108
+ tokenizer_exists = True
109
+ else:
110
+ token = kwargs.get("token", None)
111
+ if token is None:
112
+ token = kwargs.get("use_auth_token", None)
113
+
114
+ tokenizer_exists = check_file_exists_on_hf_hub(
115
+ repo_id=pretrained_model_name_or_path,
116
+ filename=TOKENIZER_CONFIG_NAME,
117
+ revision=kwargs.get("revision", None),
118
+ repo_type=kwargs.get("repo_type", None),
119
+ token=token,
120
+ )
121
+
122
+ if tokenizer_exists:
123
+ tokenizer = AutoTokenizer.from_pretrained(
124
+ pretrained_model_name_or_path, trust_remote_code=kwargs.get("trust_remote_code", False)
125
+ )
126
+ base_model.resize_token_embeddings(len(tokenizer))
127
+
128
+ return cls._target_peft_class.from_pretrained(
129
+ base_model,
130
+ pretrained_model_name_or_path,
131
+ adapter_name=adapter_name,
132
+ is_trainable=is_trainable,
133
+ config=config,
134
+ **kwargs,
135
+ )
136
+
137
+
138
+ class AutoPeftModel(_BaseAutoPeftModel):
139
+ _target_class = None
140
+ _target_peft_class = PeftModel
141
+
142
+
143
+ class AutoPeftModelForCausalLM(_BaseAutoPeftModel):
144
+ _target_class = AutoModelForCausalLM
145
+ _target_peft_class = PeftModelForCausalLM
146
+
147
+
148
+ class AutoPeftModelForSeq2SeqLM(_BaseAutoPeftModel):
149
+ _target_class = AutoModelForSeq2SeqLM
150
+ _target_peft_class = PeftModelForSeq2SeqLM
151
+
152
+
153
+ class AutoPeftModelForSequenceClassification(_BaseAutoPeftModel):
154
+ _target_class = AutoModelForSequenceClassification
155
+ _target_peft_class = PeftModelForSequenceClassification
156
+
157
+
158
+ class AutoPeftModelForTokenClassification(_BaseAutoPeftModel):
159
+ _target_class = AutoModelForTokenClassification
160
+ _target_peft_class = PeftModelForTokenClassification
161
+
162
+
163
+ class AutoPeftModelForQuestionAnswering(_BaseAutoPeftModel):
164
+ _target_class = AutoModelForQuestionAnswering
165
+ _target_peft_class = PeftModelForQuestionAnswering
166
+
167
+
168
+ class AutoPeftModelForFeatureExtraction(_BaseAutoPeftModel):
169
+ _target_class = AutoModel
170
+ _target_peft_class = PeftModelForFeatureExtraction
env-llmeval/lib/python3.10/site-packages/peft/config.py ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import inspect
15
+ import json
16
+ import os
17
+ from dataclasses import asdict, dataclass, field
18
+ from typing import Dict, Optional, Union
19
+
20
+ from huggingface_hub import hf_hub_download
21
+ from transformers.utils import PushToHubMixin
22
+
23
+ from .utils import CONFIG_NAME, PeftType, TaskType
24
+
25
+
26
+ @dataclass
27
+ class PeftConfigMixin(PushToHubMixin):
28
+ r"""
29
+ This is the base configuration class for PEFT adapter models. It contains all the methods that are common to all
30
+ PEFT adapter models. This class inherits from [`~transformers.utils.PushToHubMixin`] which contains the methods to
31
+ push your model to the Hub. The method `save_pretrained` will save the configuration of your adapter model in a
32
+ directory. The method `from_pretrained` will load the configuration of your adapter model from a directory.
33
+
34
+ Args:
35
+ peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use.
36
+ """
37
+
38
+ peft_type: Optional[PeftType] = field(default=None, metadata={"help": "The type of PEFT model."})
39
+ auto_mapping: Optional[dict] = field(
40
+ default=None, metadata={"help": "An auto mapping dict to help retrieve the base model class if needed."}
41
+ )
42
+
43
+ def to_dict(self) -> Dict:
44
+ r"""
45
+ Returns the configuration for your adapter model as a dictionary.
46
+ """
47
+ return asdict(self)
48
+
49
+ def save_pretrained(self, save_directory: str, **kwargs) -> None:
50
+ r"""
51
+ This method saves the configuration of your adapter model in a directory.
52
+
53
+ Args:
54
+ save_directory (`str`):
55
+ The directory where the configuration will be saved.
56
+ kwargs (additional keyword arguments, *optional*):
57
+ Additional keyword arguments passed along to the [`~transformers.utils.PushToHubMixin.push_to_hub`]
58
+ method.
59
+ """
60
+ if os.path.isfile(save_directory):
61
+ raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
62
+
63
+ os.makedirs(save_directory, exist_ok=True)
64
+ auto_mapping_dict = kwargs.pop("auto_mapping_dict", None)
65
+
66
+ output_dict = asdict(self)
67
+ # converting set type to list
68
+ for key, value in output_dict.items():
69
+ if isinstance(value, set):
70
+ output_dict[key] = list(value)
71
+
72
+ output_path = os.path.join(save_directory, CONFIG_NAME)
73
+
74
+ # Add auto mapping details for custom models.
75
+ if auto_mapping_dict is not None:
76
+ output_dict["auto_mapping"] = auto_mapping_dict
77
+
78
+ # save it
79
+ with open(output_path, "w") as writer:
80
+ writer.write(json.dumps(output_dict, indent=2, sort_keys=True))
81
+
82
+ @classmethod
83
+ def from_peft_type(cls, **kwargs):
84
+ r"""
85
+ This method loads the configuration of your adapter model from a set of kwargs.
86
+
87
+ The appropriate configuration type is determined by the `peft_type` argument. If `peft_type` is not provided,
88
+ the calling class type is instantiated.
89
+
90
+ Args:
91
+ kwargs (configuration keyword arguments):
92
+ Keyword arguments passed along to the configuration initialization.
93
+ """
94
+ # Avoid circular dependency .. TODO: fix this with a larger refactor
95
+ from peft.mapping import PEFT_TYPE_TO_CONFIG_MAPPING
96
+
97
+ # TODO: this hack is needed to fix the following issue (on commit 702f937):
98
+ # if someone saves a default config and loads it back with `PeftConfig` class it yields to
99
+ # not loading the correct config class.
100
+
101
+ # from peft import AdaLoraConfig, PeftConfig
102
+ # peft_config = AdaLoraConfig()
103
+ # print(peft_config)
104
+ # >>> AdaLoraConfig(peft_type=<PeftType.ADALORA: 'ADALORA'>, auto_mapping=None, base_model_name_or_path=None,
105
+ # revision=None, task_type=None, inference_mode=False, r=8, target_modules=None, lora_alpha=8, lora_dropout=0.0, ...
106
+ #
107
+ # peft_config.save_pretrained("./test_config")
108
+ # peft_config = PeftConfig.from_pretrained("./test_config")
109
+ # print(peft_config)
110
+ # >>> PeftConfig(peft_type='ADALORA', auto_mapping=None, base_model_name_or_path=None, revision=None, task_type=None, inference_mode=False)
111
+
112
+ if "peft_type" in kwargs:
113
+ peft_type = kwargs["peft_type"]
114
+ config_cls = PEFT_TYPE_TO_CONFIG_MAPPING[peft_type]
115
+ else:
116
+ config_cls = cls
117
+
118
+ return config_cls(**kwargs)
119
+
120
+ @classmethod
121
+ def from_pretrained(cls, pretrained_model_name_or_path: str, subfolder: Optional[str] = None, **kwargs):
122
+ r"""
123
+ This method loads the configuration of your adapter model from a directory.
124
+
125
+ Args:
126
+ pretrained_model_name_or_path (`str`):
127
+ The directory or the Hub repository id where the configuration is saved.
128
+ kwargs (additional keyword arguments, *optional*):
129
+ Additional keyword arguments passed along to the child class initialization.
130
+ """
131
+ path = (
132
+ os.path.join(pretrained_model_name_or_path, subfolder)
133
+ if subfolder is not None
134
+ else pretrained_model_name_or_path
135
+ )
136
+
137
+ hf_hub_download_kwargs, class_kwargs, _ = cls._split_kwargs(kwargs)
138
+
139
+ if os.path.isfile(os.path.join(path, CONFIG_NAME)):
140
+ config_file = os.path.join(path, CONFIG_NAME)
141
+ else:
142
+ try:
143
+ config_file = hf_hub_download(
144
+ pretrained_model_name_or_path, CONFIG_NAME, subfolder=subfolder, **hf_hub_download_kwargs
145
+ )
146
+ except Exception:
147
+ raise ValueError(f"Can't find '{CONFIG_NAME}' at '{pretrained_model_name_or_path}'")
148
+
149
+ loaded_attributes = cls.from_json_file(config_file)
150
+ kwargs = {**class_kwargs, **loaded_attributes}
151
+ return cls.from_peft_type(**kwargs)
152
+
153
+ @classmethod
154
+ def from_json_file(cls, path_json_file: str, **kwargs):
155
+ r"""
156
+ Loads a configuration file from a json file.
157
+
158
+ Args:
159
+ path_json_file (`str`):
160
+ The path to the json file.
161
+ """
162
+ with open(path_json_file) as file:
163
+ json_object = json.load(file)
164
+
165
+ return json_object
166
+
167
+ @classmethod
168
+ def _split_kwargs(cls, kwargs):
169
+ hf_hub_download_kwargs = {}
170
+ class_kwargs = {}
171
+ other_kwargs = {}
172
+
173
+ for key, value in kwargs.items():
174
+ if key in inspect.signature(hf_hub_download).parameters:
175
+ hf_hub_download_kwargs[key] = value
176
+ elif key in list(cls.__annotations__):
177
+ class_kwargs[key] = value
178
+ else:
179
+ other_kwargs[key] = value
180
+
181
+ return hf_hub_download_kwargs, class_kwargs, other_kwargs
182
+
183
+ @classmethod
184
+ def _get_peft_type(
185
+ cls,
186
+ model_id: str,
187
+ **hf_hub_download_kwargs,
188
+ ):
189
+ subfolder = hf_hub_download_kwargs.get("subfolder", None)
190
+
191
+ path = os.path.join(model_id, subfolder) if subfolder is not None else model_id
192
+
193
+ if os.path.isfile(os.path.join(path, CONFIG_NAME)):
194
+ config_file = os.path.join(path, CONFIG_NAME)
195
+ else:
196
+ try:
197
+ config_file = hf_hub_download(
198
+ model_id,
199
+ CONFIG_NAME,
200
+ **hf_hub_download_kwargs,
201
+ )
202
+ except Exception:
203
+ raise ValueError(f"Can't find '{CONFIG_NAME}' at '{model_id}'")
204
+
205
+ loaded_attributes = cls.from_json_file(config_file)
206
+ return loaded_attributes["peft_type"]
207
+
208
+ @property
209
+ def is_prompt_learning(self) -> bool:
210
+ r"""
211
+ Utility method to check if the configuration is for prompt learning.
212
+ """
213
+ return False
214
+
215
+ @property
216
+ def is_adaption_prompt(self) -> bool:
217
+ """Return True if this is an adaption prompt config."""
218
+ return False
219
+
220
+
221
+ @dataclass
222
+ class PeftConfig(PeftConfigMixin):
223
+ """
224
+ This is the base configuration class to store the configuration of a [`PeftModel`].
225
+
226
+ Args:
227
+ peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use.
228
+ task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform.
229
+ inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode.
230
+ """
231
+
232
+ base_model_name_or_path: Optional[str] = field(
233
+ default=None, metadata={"help": "The name of the base model to use."}
234
+ )
235
+ revision: Optional[str] = field(default=None, metadata={"help": "The specific model version to use."})
236
+ peft_type: Optional[Union[str, PeftType]] = field(default=None, metadata={"help": "Peft type"})
237
+ task_type: Optional[Union[str, TaskType]] = field(default=None, metadata={"help": "Task type"})
238
+ inference_mode: bool = field(default=False, metadata={"help": "Whether to use inference mode"})
239
+
240
+
241
+ @dataclass
242
+ class PromptLearningConfig(PeftConfig):
243
+ """
244
+ This is the base configuration class to store the configuration of [`PrefixTuning`], [`PromptEncoder`], or
245
+ [`PromptTuning`].
246
+
247
+ Args:
248
+ num_virtual_tokens (`int`): The number of virtual tokens to use.
249
+ token_dim (`int`): The hidden embedding dimension of the base transformer model.
250
+ num_transformer_submodules (`int`): The number of transformer submodules in the base transformer model.
251
+ num_attention_heads (`int`): The number of attention heads in the base transformer model.
252
+ num_layers (`int`): The number of layers in the base transformer model.
253
+ """
254
+
255
+ num_virtual_tokens: int = field(default=None, metadata={"help": "Number of virtual tokens"})
256
+ token_dim: int = field(
257
+ default=None, metadata={"help": "The hidden embedding dimension of the base transformer model"}
258
+ )
259
+ num_transformer_submodules: Optional[int] = field(
260
+ default=None, metadata={"help": "Number of transformer submodules"}
261
+ )
262
+ num_attention_heads: Optional[int] = field(default=None, metadata={"help": "Number of attention heads"})
263
+ num_layers: Optional[int] = field(default=None, metadata={"help": "Number of transformer layers"})
264
+
265
+ @property
266
+ def is_prompt_learning(self) -> bool:
267
+ r"""
268
+ Utility method to check if the configuration is for prompt learning.
269
+ """
270
+ return True
env-llmeval/lib/python3.10/site-packages/peft/helpers.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from copy import deepcopy
3
+ from functools import update_wrapper
4
+ from types import MethodType
5
+
6
+ from .peft_model import PeftModel
7
+
8
+
9
+ def update_forward_signature(model: PeftModel) -> None:
10
+ """
11
+ Args:
12
+ Updates the forward signature of the PeftModel to include parents class signature
13
+ model (`PeftModel`): Peft model to update the forward signature
14
+ Example:
15
+
16
+ ```python
17
+ >>> from transformers import WhisperForConditionalGeneration
18
+ >>> from peft import get_peft_model, LoraConfig, update_forward_signature
19
+
20
+ >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en")
21
+ >>> peft_config = LoraConfig(r=8, lora_alpha=32, lora_dropout=0.1, target_modules=["q_proj", "v_proj"])
22
+
23
+ >>> peft_model = get_peft_model(model, peft_config)
24
+ >>> update_forward_signature(peft_model)
25
+ ```
26
+ """
27
+
28
+ # Only update signature when the current forward signature only has *args and **kwargs
29
+ current_signature = inspect.signature(model.forward)
30
+ if (
31
+ len(current_signature.parameters) == 2
32
+ and "args" in current_signature.parameters
33
+ and "kwargs" in current_signature.parameters
34
+ ):
35
+ forward = deepcopy(model.forward.__func__)
36
+ update_wrapper(
37
+ forward, type(model.get_base_model()).forward, assigned=("__doc__", "__name__", "__annotations__")
38
+ )
39
+ model.forward = MethodType(forward, model)
40
+
41
+
42
+ def update_generate_signature(model: PeftModel) -> None:
43
+ """
44
+ Args:
45
+ Updates the generate signature of a PeftModel with overriding generate to include parents class signature
46
+ model (`PeftModel`): Peft model to update the generate signature
47
+ Example:
48
+
49
+ ```python
50
+ >>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
51
+ >>> from peft import get_peft_model, LoraConfig, TaskType, update_generate_signature
52
+
53
+ >>> model_name_or_path = "bigscience/mt0-large"
54
+ >>> tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
55
+ >>> model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path)
56
+
57
+ >>> peft_config = LoraConfig(
58
+ ... task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1
59
+ ... )
60
+ >>> peft_model = get_peft_model(model, peft_config)
61
+ >>> update_generate_signature(peft_model)
62
+ >>> help(peft_model.generate)
63
+ ```
64
+ """
65
+ if not hasattr(model, "generate"):
66
+ return
67
+ current_signature = inspect.signature(model.generate)
68
+ if (
69
+ len(current_signature.parameters) == 2
70
+ and "args" in current_signature.parameters
71
+ and "kwargs" in current_signature.parameters
72
+ ) or (len(current_signature.parameters) == 1 and "kwargs" in current_signature.parameters):
73
+ generate = deepcopy(model.generate.__func__)
74
+ update_wrapper(
75
+ generate,
76
+ type(model.get_base_model()).generate,
77
+ assigned=("__doc__", "__name__", "__annotations__"),
78
+ )
79
+ model.generate = MethodType(generate, model)
80
+
81
+
82
+ def update_signature(model: PeftModel, method: str = "all") -> None:
83
+ """
84
+ Args:
85
+ Updates the signature of a PeftModel include parents class signature for forward or generate method
86
+ model (`PeftModel`): Peft model to update generate or forward signature method (`str`): method to update
87
+ signature choose one of "forward", "generate", "all"
88
+ Example:
89
+ ```python
90
+ >>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
91
+ >>> from peft import get_peft_model, LoraConfig, TaskType, update_signature
92
+
93
+ >>> model_name_or_path = "bigscience/mt0-large"
94
+ >>> tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
95
+ >>> model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path)
96
+
97
+ >>> peft_config = LoraConfig(
98
+ ... task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1
99
+ ... )
100
+ >>> peft_model = get_peft_model(model, peft_config)
101
+ >>> update_signature(peft_model)
102
+ >>> help(peft_model.generate)
103
+ ```
104
+ """
105
+ if method == "forward":
106
+ update_forward_signature(model)
107
+ elif method == "generate":
108
+ update_generate_signature(model)
109
+ elif method == "all":
110
+ update_forward_signature(model)
111
+ update_generate_signature(model)
112
+ else:
113
+ raise ValueError(f"method {method} is not supported please choose one of ['forward', 'generate', 'all']")
env-llmeval/lib/python3.10/site-packages/peft/import_utils.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import importlib
15
+ import importlib.metadata as importlib_metadata
16
+ from functools import lru_cache
17
+
18
+ import packaging.version
19
+
20
+
21
+ def is_bnb_available() -> bool:
22
+ return importlib.util.find_spec("bitsandbytes") is not None
23
+
24
+
25
+ def is_bnb_4bit_available() -> bool:
26
+ if not is_bnb_available():
27
+ return False
28
+
29
+ import bitsandbytes as bnb
30
+
31
+ return hasattr(bnb.nn, "Linear4bit")
32
+
33
+
34
+ def is_auto_gptq_available():
35
+ if importlib.util.find_spec("auto_gptq") is not None:
36
+ AUTOGPTQ_MINIMUM_VERSION = packaging.version.parse("0.5.0")
37
+ version_autogptq = packaging.version.parse(importlib_metadata.version("auto_gptq"))
38
+ if AUTOGPTQ_MINIMUM_VERSION <= version_autogptq:
39
+ return True
40
+ else:
41
+ raise ImportError(
42
+ f"Found an incompatible version of auto-gptq. Found version {version_autogptq}, "
43
+ f"but only versions above {AUTOGPTQ_MINIMUM_VERSION} are supported"
44
+ )
45
+
46
+
47
+ def is_optimum_available() -> bool:
48
+ return importlib.util.find_spec("optimum") is not None
49
+
50
+
51
+ @lru_cache
52
+ def is_torch_tpu_available(check_device=True):
53
+ "Checks if `torch_xla` is installed and potentially if a TPU is in the environment"
54
+ if importlib.util.find_spec("torch_xla") is not None:
55
+ if check_device:
56
+ # We need to check if `xla_device` can be found, will raise a RuntimeError if not
57
+ try:
58
+ import torch_xla.core.xla_model as xm
59
+
60
+ _ = xm.xla_device()
61
+ return True
62
+ except RuntimeError:
63
+ return False
64
+ return True
65
+ return False
66
+
67
+
68
+ def is_aqlm_available():
69
+ return importlib.util.find_spec("aqlm") is not None
70
+
71
+
72
+ def is_auto_awq_available():
73
+ return importlib.util.find_spec("awq") is not None
env-llmeval/lib/python3.10/site-packages/peft/mapping.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from __future__ import annotations
16
+
17
+ from typing import TYPE_CHECKING, Any
18
+
19
+ import torch
20
+
21
+ from .config import PeftConfig
22
+ from .mixed_model import PeftMixedModel
23
+ from .peft_model import (
24
+ PeftModel,
25
+ PeftModelForCausalLM,
26
+ PeftModelForFeatureExtraction,
27
+ PeftModelForQuestionAnswering,
28
+ PeftModelForSeq2SeqLM,
29
+ PeftModelForSequenceClassification,
30
+ PeftModelForTokenClassification,
31
+ )
32
+ from .tuners import (
33
+ AdaLoraConfig,
34
+ AdaLoraModel,
35
+ AdaptionPromptConfig,
36
+ IA3Config,
37
+ IA3Model,
38
+ LoHaConfig,
39
+ LoHaModel,
40
+ LoKrConfig,
41
+ LoKrModel,
42
+ LoraConfig,
43
+ LoraModel,
44
+ MultitaskPromptTuningConfig,
45
+ OFTConfig,
46
+ OFTModel,
47
+ PolyConfig,
48
+ PolyModel,
49
+ PrefixTuningConfig,
50
+ PromptEncoderConfig,
51
+ PromptTuningConfig,
52
+ )
53
+ from .utils import _prepare_prompt_learning_config
54
+
55
+
56
+ if TYPE_CHECKING:
57
+ from transformers import PreTrainedModel
58
+
59
+
60
+ MODEL_TYPE_TO_PEFT_MODEL_MAPPING: dict[str, PeftModel] = {
61
+ "SEQ_CLS": PeftModelForSequenceClassification,
62
+ "SEQ_2_SEQ_LM": PeftModelForSeq2SeqLM,
63
+ "CAUSAL_LM": PeftModelForCausalLM,
64
+ "TOKEN_CLS": PeftModelForTokenClassification,
65
+ "QUESTION_ANS": PeftModelForQuestionAnswering,
66
+ "FEATURE_EXTRACTION": PeftModelForFeatureExtraction,
67
+ }
68
+
69
+ PEFT_TYPE_TO_CONFIG_MAPPING: dict[str, PeftConfig] = {
70
+ "ADAPTION_PROMPT": AdaptionPromptConfig,
71
+ "PROMPT_TUNING": PromptTuningConfig,
72
+ "PREFIX_TUNING": PrefixTuningConfig,
73
+ "P_TUNING": PromptEncoderConfig,
74
+ "LORA": LoraConfig,
75
+ "LOHA": LoHaConfig,
76
+ "LOKR": LoKrConfig,
77
+ "ADALORA": AdaLoraConfig,
78
+ "IA3": IA3Config,
79
+ "MULTITASK_PROMPT_TUNING": MultitaskPromptTuningConfig,
80
+ "OFT": OFTConfig,
81
+ "POLY": PolyConfig,
82
+ }
83
+
84
+ PEFT_TYPE_TO_TUNER_MAPPING = {
85
+ "LORA": LoraModel,
86
+ "LOHA": LoHaModel,
87
+ "LOKR": LoKrModel,
88
+ "ADALORA": AdaLoraModel,
89
+ "IA3": IA3Model,
90
+ "OFT": OFTModel,
91
+ "POLY": PolyModel,
92
+ }
93
+
94
+
95
+ def get_peft_config(config_dict: dict[str, Any]) -> PeftConfig:
96
+ """
97
+ Returns a Peft config object from a dictionary.
98
+
99
+ Args:
100
+ config_dict (`Dict[str, Any]`): Dictionary containing the configuration parameters.
101
+ """
102
+
103
+ return PEFT_TYPE_TO_CONFIG_MAPPING[config_dict["peft_type"]](**config_dict)
104
+
105
+
106
+ def get_peft_model(
107
+ model: PreTrainedModel, peft_config: PeftConfig, adapter_name: str = "default", mixed: bool = False
108
+ ) -> PeftModel | PeftMixedModel:
109
+ """
110
+ Returns a Peft model object from a model and a config.
111
+
112
+ Args:
113
+ model ([`transformers.PreTrainedModel`]):
114
+ Model to be wrapped.
115
+ peft_config ([`PeftConfig`]):
116
+ Configuration object containing the parameters of the Peft model.
117
+ adapter_name (`str`, `optional`, defaults to `"default"`):
118
+ The name of the adapter to be injected, if not provided, the default adapter name is used ("default").
119
+ mixed (`bool`, `optional`, defaults to `False`):
120
+ Whether to allow mixing different (compatible) adapter types.
121
+ """
122
+ model_config = getattr(model, "config", {"model_type": "custom"})
123
+ if hasattr(model_config, "to_dict"):
124
+ model_config = model_config.to_dict()
125
+
126
+ peft_config.base_model_name_or_path = model.__dict__.get("name_or_path", None)
127
+
128
+ if mixed:
129
+ return PeftMixedModel(model, peft_config, adapter_name=adapter_name)
130
+
131
+ if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and not peft_config.is_prompt_learning:
132
+ return PeftModel(model, peft_config, adapter_name=adapter_name)
133
+
134
+ if peft_config.is_prompt_learning:
135
+ peft_config = _prepare_prompt_learning_config(peft_config, model_config)
136
+ return MODEL_TYPE_TO_PEFT_MODEL_MAPPING[peft_config.task_type](model, peft_config, adapter_name=adapter_name)
137
+
138
+
139
+ def inject_adapter_in_model(
140
+ peft_config: PeftConfig, model: torch.nn.Module, adapter_name: str = "default"
141
+ ) -> torch.nn.Module:
142
+ r"""
143
+ A simple API to create and inject adapter in-place into a model. Currently the API does not support prompt learning
144
+ methods and adaption prompt. Make sure to have the correct `target_names` set in the `peft_config` object. The API
145
+ calls `get_peft_model` under the hood but would be restricted only to non-prompt learning methods.
146
+
147
+ Args:
148
+ peft_config (`PeftConfig`):
149
+ Configuration object containing the parameters of the Peft model.
150
+ model (`torch.nn.Module`):
151
+ The input model where the adapter will be injected.
152
+ adapter_name (`str`, `optional`, defaults to `"default"`):
153
+ The name of the adapter to be injected, if not provided, the default adapter name is used ("default").
154
+ """
155
+ if peft_config.is_prompt_learning or peft_config.is_adaption_prompt:
156
+ raise ValueError("`create_and_replace` does not support prompt learning and adaption prompt yet.")
157
+
158
+ if peft_config.peft_type not in PEFT_TYPE_TO_TUNER_MAPPING.keys():
159
+ raise ValueError(
160
+ f"`inject_adapter_in_model` does not support {peft_config.peft_type} yet. Please use `get_peft_model`."
161
+ )
162
+
163
+ tuner_cls = PEFT_TYPE_TO_TUNER_MAPPING[peft_config.peft_type]
164
+
165
+ # By instantiating a peft model we are injecting randomly initialized LoRA layers into the model's modules.
166
+ peft_model = tuner_cls(model, peft_config, adapter_name=adapter_name)
167
+
168
+ return peft_model.model
env-llmeval/lib/python3.10/site-packages/peft/mixed_model.py ADDED
@@ -0,0 +1,409 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from __future__ import annotations
16
+
17
+ import os
18
+ from contextlib import contextmanager
19
+ from typing import Any, Optional, Union
20
+
21
+ import torch
22
+ from accelerate.hooks import remove_hook_from_submodules
23
+ from torch import nn
24
+ from transformers.utils import PushToHubMixin
25
+
26
+ from peft.tuners.mixed import COMPATIBLE_TUNER_TYPES
27
+
28
+ from .config import PeftConfig
29
+ from .peft_model import PeftModel
30
+ from .tuners import (
31
+ AdaLoraModel,
32
+ IA3Model,
33
+ LoHaModel,
34
+ LoKrModel,
35
+ LoraModel,
36
+ MixedModel,
37
+ OFTModel,
38
+ )
39
+ from .utils import PeftType, _set_adapter, _set_trainable
40
+
41
+
42
+ PEFT_TYPE_TO_MODEL_MAPPING = {
43
+ PeftType.LORA: LoraModel,
44
+ PeftType.LOHA: LoHaModel,
45
+ PeftType.LOKR: LoKrModel,
46
+ PeftType.ADALORA: AdaLoraModel,
47
+ PeftType.IA3: IA3Model,
48
+ PeftType.OFT: OFTModel,
49
+ }
50
+
51
+
52
+ def _prepare_model_for_gradient_checkpointing(model: nn.Module) -> None:
53
+ r"""
54
+ Prepares the model for gradient checkpointing if necessary
55
+ """
56
+ # Note: same as PeftModel._prepare_model_for_gradient_checkpointing
57
+ if not getattr(model, "is_gradient_checkpointing", True):
58
+ return model
59
+
60
+ if not (
61
+ getattr(model, "is_loaded_in_8bit", False)
62
+ or getattr(model, "is_loaded_in_4bit", False)
63
+ or getattr(model, "is_quantized", False)
64
+ ):
65
+ if hasattr(model, "enable_input_require_grads"):
66
+ model.enable_input_require_grads()
67
+ elif hasattr(model, "get_input_embeddings"):
68
+
69
+ def make_inputs_require_grad(module, input, output):
70
+ output.requires_grad_(True)
71
+
72
+ model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
73
+
74
+
75
+ def _check_config_compatible(peft_config: PeftConfig) -> None:
76
+ if peft_config.peft_type not in COMPATIBLE_TUNER_TYPES:
77
+ raise ValueError(
78
+ f"The provided `peft_type` '{peft_config.peft_type.value}' is not compatible with the `PeftMixedModel`. "
79
+ f"Compatible types are: {COMPATIBLE_TUNER_TYPES}"
80
+ )
81
+
82
+
83
+ class PeftMixedModel(PushToHubMixin, torch.nn.Module):
84
+ """
85
+ PeftMixedModel for loading mixing different types of adapters for inference.
86
+
87
+ This class does not support loading/saving, and it shouldn't usually be initialized directly. Instead, use
88
+ `get_peft_model` with the argument `mixed=True`.
89
+
90
+ <Tip>
91
+
92
+ Read the [Mixed adapter types](https://huggingface.co/docs/peft/en/developer_guides/mixed_models) guide to learn
93
+ more about using different adapter types.
94
+
95
+ </Tip>
96
+
97
+ Example:
98
+
99
+ ```py
100
+ >>> from peft import get_peft_model
101
+
102
+ >>> base_model = ... # load the base model, e.g. from transformers
103
+ >>> peft_model = PeftMixedModel.from_pretrained(base_model, path_to_adapter1, "adapter1").eval()
104
+ >>> peft_model.load_adapter(path_to_adapter2, "adapter2")
105
+ >>> peft_model.set_adapter(["adapter1", "adapter2"]) # activate both adapters
106
+ >>> peft_model(data) # forward pass using both adapters
107
+ ```
108
+
109
+ Args:
110
+ model (`torch.nn.Module`):
111
+ The model to be tuned.
112
+ config (`PeftConfig`):
113
+ The config of the model to be tuned. The adapter type must be compatible.
114
+ adapter_name (`str`, `optional`, defaults to `"default"`):
115
+ The name of the first adapter.
116
+ """
117
+
118
+ def __init__(self, model: nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None:
119
+ super().__init__()
120
+ _check_config_compatible(peft_config)
121
+ _prepare_model_for_gradient_checkpointing(model)
122
+ self.modules_to_save = None
123
+ self.base_model = MixedModel(model, {adapter_name: peft_config}, adapter_name)
124
+ self.set_modules_to_save(peft_config, adapter_name)
125
+
126
+ self.config = getattr(model, "config", {"model_type": "custom"})
127
+
128
+ # the `pretraining_tp` is set for some models to simulate Tensor Parallelism during inference to avoid
129
+ # numerical differences, https://github.com/pytorch/pytorch/issues/76232 - to avoid any unexpected
130
+ # behavior we disable that in this line.
131
+ if hasattr(self.base_model, "config") and hasattr(self.base_model.config, "pretraining_tp"):
132
+ self.base_model.config.pretraining_tp = 1
133
+
134
+ @property
135
+ def peft_config(self) -> dict[str, PeftConfig]:
136
+ return self.base_model.peft_config
137
+
138
+ @property
139
+ def active_adapter(self) -> str:
140
+ return self.base_model.active_adapter
141
+
142
+ @property
143
+ def active_adapters(self) -> list[str]:
144
+ return self.base_model.active_adapters
145
+
146
+ def get_nb_trainable_parameters(self):
147
+ r"""
148
+ Returns the number of trainable parameters and number of all parameters in the model.
149
+ """
150
+ # note: same as PeftModel.get_nb_trainable_parameters
151
+ trainable_params = 0
152
+ all_param = 0
153
+ for _, param in self.named_parameters():
154
+ num_params = param.numel()
155
+ # if using DS Zero 3 and the weights are initialized empty
156
+ if num_params == 0 and hasattr(param, "ds_numel"):
157
+ num_params = param.ds_numel
158
+
159
+ # Due to the design of 4bit linear layers from bitsandbytes
160
+ # one needs to multiply the number of parameters by 2 to get
161
+ # the correct number of parameters
162
+ if param.__class__.__name__ == "Params4bit":
163
+ num_params = num_params * 2
164
+
165
+ all_param += num_params
166
+ if param.requires_grad:
167
+ trainable_params += num_params
168
+
169
+ return trainable_params, all_param
170
+
171
+ def print_trainable_parameters(self):
172
+ """
173
+ Prints the number of trainable parameters in the model.
174
+
175
+ Note: print_trainable_parameters() uses get_nb_trainable_parameters() which is different from
176
+ num_parameters(only_trainable=True) from huggingface/transformers. get_nb_trainable_parameters() returns
177
+ (trainable parameters, all parameters) of the Peft Model which includes modified backbone transformer model.
178
+ For techniques like LoRA, the backbone transformer model is modified in place with LoRA modules. However, for
179
+ prompt tuning, the backbone transformer model is unmodified. num_parameters(only_trainable=True) returns number
180
+ of trainable parameters of the backbone transformer model which can be different.
181
+ """
182
+ # note: same as PeftModel.print_trainable_parameters
183
+ trainable_params, all_param = self.get_nb_trainable_parameters()
184
+
185
+ print(
186
+ f"trainable params: {trainable_params:,d} || "
187
+ f"all params: {all_param:,d} || "
188
+ f"trainable%: {100 * trainable_params / all_param:.4f}"
189
+ )
190
+
191
+ def __getattr__(self, name: str):
192
+ """Forward missing attributes to the wrapped module."""
193
+ try:
194
+ return super().__getattr__(name) # defer to nn.Module's logic
195
+ except AttributeError:
196
+ return getattr(self.base_model, name)
197
+
198
+ def forward(self, *args: Any, **kwargs: Any):
199
+ """
200
+ Forward pass of the model.
201
+ """
202
+ return self.base_model(*args, **kwargs)
203
+
204
+ def generate(self, *args: Any, **kwargs: Any):
205
+ """
206
+ Generate output.
207
+ """
208
+ return self.base_model.generate(*args, **kwargs)
209
+
210
+ @contextmanager
211
+ def disable_adapter(self):
212
+ """
213
+ Disables the adapter module.
214
+ """
215
+ try:
216
+ self.base_model.disable_adapter_layers()
217
+ yield
218
+ finally:
219
+ self.base_model.enable_adapter_layers()
220
+
221
+ def add_adapter(self, adapter_name: str, peft_config: PeftConfig):
222
+ _check_config_compatible(peft_config)
223
+
224
+ try:
225
+ self.peft_config[adapter_name] = peft_config
226
+ self.base_model.inject_adapter(self, adapter_name)
227
+ except Exception: # something went wrong, roll back
228
+ if adapter_name in self.peft_config:
229
+ del self.peft_config[adapter_name]
230
+ raise
231
+
232
+ self.set_modules_to_save(peft_config, adapter_name)
233
+
234
+ def set_modules_to_save(self, peft_config: PeftConfig, adapter_name: str) -> None:
235
+ if (modules_to_save := getattr(peft_config, "modules_to_save", None)) is None:
236
+ return
237
+
238
+ if self.modules_to_save is None:
239
+ self.modules_to_save = set(modules_to_save)
240
+ else:
241
+ self.modules_to_save.update(modules_to_save)
242
+ _set_trainable(self, adapter_name)
243
+
244
+ def set_adapter(self, adapter_name: Union[str, list[str]]) -> None:
245
+ """
246
+ Sets the active adapter(s) for the model.
247
+
248
+ Note that the order in which the adapters are applied during the forward pass may not be the same as the order
249
+ in which they are passed to this function. Instead, the order during the forward pass is determined by the
250
+ order in which the adapters were loaded into the model. The active adapters only determine which adapters are
251
+ active during the forward pass, but not the order in which they are applied.
252
+
253
+ Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is
254
+ not desired, use the following code.
255
+
256
+ ```py
257
+ >>> for name, param in model_peft.named_parameters():
258
+ ... if ...: # some check on name (ex. if 'lora' in name)
259
+ ... param.requires_grad = False
260
+ ```
261
+
262
+ Args:
263
+ adapter_name (`str` or `List[str]`):
264
+ The name of the adapter(s) to be activated.
265
+ """
266
+ if isinstance(adapter_name, str):
267
+ adapter_name = [adapter_name]
268
+
269
+ mismatched = set(adapter_name) - set(self.peft_config.keys())
270
+ if mismatched:
271
+ raise ValueError(
272
+ f"Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}"
273
+ )
274
+
275
+ self.base_model.set_adapter(adapter_name)
276
+ _set_adapter(self, adapter_name)
277
+
278
+ def delete_adapter(self, adapter_name: Union[str, list[str]]) -> None:
279
+ if isinstance(adapter_name, str):
280
+ adapter_name = [adapter_name]
281
+
282
+ mismatched = set(adapter_name) - set(self.peft_config.keys())
283
+ if mismatched:
284
+ raise ValueError(
285
+ f"Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}"
286
+ )
287
+
288
+ self.base_model.delete_adapter(adapter_name)
289
+
290
+ def merge_and_unload(self, *args: Any, **kwargs: Any):
291
+ r"""
292
+ This method merges the adapter layers into the base model. This is needed if someone wants to use the base
293
+ model as a standalone model.
294
+
295
+ Args:
296
+ progressbar (`bool`):
297
+ whether to show a progressbar indicating the unload and merge process
298
+ safe_merge (`bool`):
299
+ whether to activate the safe merging check to check if there is any potential Nan in the adapter
300
+ weights
301
+ adapter_names (`List[str]`, *optional*):
302
+ The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
303
+ to `None`.
304
+ """
305
+ return self.base_model.merge_and_unload(*args, **kwargs)
306
+
307
+ def unload(self, *args: Any, **kwargs: Any):
308
+ """
309
+ Gets back the base model by removing all the adapter modules without merging. This gives back the original base
310
+ model.
311
+ """
312
+ return self.base_model.unload(*args, **kwargs)
313
+
314
+ @classmethod
315
+ def _split_kwargs(cls, kwargs: dict[str, Any]):
316
+ return PeftModel._split_kwargs(kwargs)
317
+
318
+ def load_adapter(self, model_id: str, adapter_name: str, *args: Any, **kwargs: Any):
319
+ output = PeftModel.load_adapter(self, model_id, adapter_name, *args, **kwargs)
320
+ # TODO: not quite clear why this is necessary but tests fail without it
321
+ self.set_adapter(self.active_adapters)
322
+ return output
323
+
324
+ def create_or_update_model_card(self, output_dir: str):
325
+ raise NotImplementedError(f"Model card creation is not supported for {self.__class__.__name__} (yet).")
326
+
327
+ def save_pretrained(
328
+ self,
329
+ save_directory: str,
330
+ safe_serialization: bool = False,
331
+ selected_adapters: Optional[list[str]] = None,
332
+ **kwargs: Any,
333
+ ):
334
+ raise NotImplementedError(f"Saving is not supported for {self.__class__.__name__} (yet).")
335
+
336
+ @classmethod
337
+ def from_pretrained(
338
+ cls,
339
+ model: nn.Module,
340
+ model_id: str | os.PathLike,
341
+ adapter_name: str = "default",
342
+ is_trainable: bool = False,
343
+ config: Optional[PeftConfig] = None,
344
+ **kwargs: Any,
345
+ ):
346
+ r"""
347
+ Instantiate a PEFT mixed model from a pretrained model and loaded PEFT weights.
348
+
349
+ Note that the passed `model` may be modified inplace.
350
+
351
+ Args:
352
+ model (`nn.Module`):
353
+ The model to be adapted.
354
+ model_id (`str` or `os.PathLike`):
355
+ The name of the PEFT configuration to use. Can be either:
356
+ - A string, the `model id` of a PEFT configuration hosted inside a model repo on the Hugging Face
357
+ Hub.
358
+ - A path to a directory containing a PEFT configuration file saved using the `save_pretrained`
359
+ method (`./my_peft_config_directory/`).
360
+ adapter_name (`str`, *optional*, defaults to `"default"`):
361
+ The name of the adapter to be loaded. This is useful for loading multiple adapters.
362
+ is_trainable (`bool`, *optional*, defaults to `False`):
363
+ Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and use for
364
+ inference
365
+ config ([`~peft.PeftConfig`], *optional*):
366
+ The configuration object to use instead of an automatically loaded configuration. This configuration
367
+ object is mutually exclusive with `model_id` and `kwargs`. This is useful when configuration is already
368
+ loaded before calling `from_pretrained`.
369
+ kwargs: (`optional`):
370
+ Additional keyword arguments passed along to the specific PEFT configuration class.
371
+ """
372
+ # note: adapted from PeftModel.from_pretrained
373
+ from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING
374
+
375
+ # load the config
376
+ if config is None:
377
+ config = PEFT_TYPE_TO_CONFIG_MAPPING[
378
+ PeftConfig._get_peft_type(
379
+ model_id,
380
+ subfolder=kwargs.get("subfolder", None),
381
+ revision=kwargs.get("revision", None),
382
+ cache_dir=kwargs.get("cache_dir", None),
383
+ use_auth_token=kwargs.get("use_auth_token", None),
384
+ )
385
+ ].from_pretrained(model_id, **kwargs)
386
+ elif isinstance(config, PeftConfig):
387
+ config.inference_mode = not is_trainable
388
+ else:
389
+ raise ValueError(f"The input config must be a PeftConfig, got {config.__class__}")
390
+
391
+ # note: this is different from PeftModel.from_pretrained
392
+ if config.peft_type not in PEFT_TYPE_TO_MODEL_MAPPING:
393
+ raise ValueError(f"Adapter of type {config.peft_type} is not supported for mixed models.")
394
+
395
+ if (getattr(model, "hf_device_map", None) is not None) and len(
396
+ set(model.hf_device_map.values()).intersection({"cpu", "disk"})
397
+ ) > 0:
398
+ remove_hook_from_submodules(model)
399
+
400
+ if config.is_prompt_learning and is_trainable:
401
+ # note: should not be possible to reach, but just in case
402
+ raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.")
403
+ else:
404
+ config.inference_mode = not is_trainable
405
+
406
+ # note: this is different from PeftModel.from_pretrained, we always return a PeftMixedModel
407
+ model = cls(model, config, adapter_name)
408
+ model.load_adapter(model_id, adapter_name, is_trainable=is_trainable, **kwargs)
409
+ return model
env-llmeval/lib/python3.10/site-packages/peft/peft_model.py ADDED
@@ -0,0 +1,1986 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from __future__ import annotations
16
+
17
+ import collections
18
+ import inspect
19
+ import os
20
+ import warnings
21
+ from contextlib import contextmanager
22
+ from copy import deepcopy
23
+ from typing import Any, Optional, Union
24
+
25
+ import packaging.version
26
+ import torch
27
+ import transformers
28
+ from accelerate import dispatch_model, infer_auto_device_map
29
+ from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules
30
+ from accelerate.utils import get_balanced_memory
31
+ from huggingface_hub import ModelCard, ModelCardData, hf_hub_download
32
+ from safetensors.torch import save_file as safe_save_file
33
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
34
+ from transformers import PreTrainedModel
35
+ from transformers.modeling_outputs import QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
36
+ from transformers.utils import PushToHubMixin
37
+
38
+ from . import __version__
39
+ from .config import PeftConfig
40
+ from .tuners import (
41
+ AdaLoraModel,
42
+ AdaptionPromptModel,
43
+ IA3Model,
44
+ LoHaModel,
45
+ LoKrModel,
46
+ LoraModel,
47
+ MultitaskPromptEmbedding,
48
+ OFTModel,
49
+ PolyModel,
50
+ PrefixEncoder,
51
+ PromptEmbedding,
52
+ PromptEncoder,
53
+ )
54
+ from .utils import (
55
+ SAFETENSORS_WEIGHTS_NAME,
56
+ TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING,
57
+ WEIGHTS_NAME,
58
+ PeftType,
59
+ TaskType,
60
+ _get_batch_size,
61
+ _prepare_prompt_learning_config,
62
+ _set_adapter,
63
+ _set_trainable,
64
+ get_peft_model_state_dict,
65
+ id_tensor_storage,
66
+ infer_device,
67
+ load_peft_weights,
68
+ set_peft_model_state_dict,
69
+ shift_tokens_right,
70
+ )
71
+
72
+
73
+ PEFT_TYPE_TO_MODEL_MAPPING = {
74
+ PeftType.LORA: LoraModel,
75
+ PeftType.LOHA: LoHaModel,
76
+ PeftType.LOKR: LoKrModel,
77
+ PeftType.PROMPT_TUNING: PromptEmbedding,
78
+ PeftType.P_TUNING: PromptEncoder,
79
+ PeftType.PREFIX_TUNING: PrefixEncoder,
80
+ PeftType.ADALORA: AdaLoraModel,
81
+ PeftType.ADAPTION_PROMPT: AdaptionPromptModel,
82
+ PeftType.IA3: IA3Model,
83
+ PeftType.OFT: OFTModel,
84
+ PeftType.POLY: PolyModel,
85
+ }
86
+
87
+
88
+ class PeftModel(PushToHubMixin, torch.nn.Module):
89
+ """
90
+ Base model encompassing various Peft methods.
91
+
92
+ Args:
93
+ model ([`~transformers.PreTrainedModel`]): The base transformer model used for Peft.
94
+ peft_config ([`PeftConfig`]): The configuration of the Peft model.
95
+ adapter_name (`str`, *optional*): The name of the adapter, defaults to `"default"`.
96
+
97
+ **Attributes**:
98
+ - **base_model** ([`torch.nn.Module`]) -- The base transformer model used for Peft.
99
+ - **peft_config** ([`PeftConfig`]) -- The configuration of the Peft model.
100
+ - **modules_to_save** (`list` of `str`) -- The list of sub-module names to save when
101
+ saving the model.
102
+ - **prompt_encoder** ([`PromptEncoder`]) -- The prompt encoder used for Peft if
103
+ using [`PromptLearningConfig`].
104
+ - **prompt_tokens** (`torch.Tensor`) -- The virtual prompt tokens used for Peft if
105
+ using [`PromptLearningConfig`].
106
+ - **transformer_backbone_name** (`str`) -- The name of the transformer
107
+ backbone in the base model if using [`PromptLearningConfig`].
108
+ - **word_embeddings** (`torch.nn.Embedding`) -- The word embeddings of the transformer backbone
109
+ in the base model if using [`PromptLearningConfig`].
110
+ """
111
+
112
+ def __init__(self, model: PreTrainedModel, peft_config: PeftConfig, adapter_name: str = "default") -> None:
113
+ super().__init__()
114
+ self.modules_to_save = None
115
+ self.active_adapter = adapter_name
116
+ self.peft_type = peft_config.peft_type
117
+ # These args are special PEFT arguments that users can pass. They need to be removed before passing them to
118
+ # forward.
119
+ self.special_peft_forward_args = {"adapter_names"}
120
+
121
+ self._is_prompt_learning = peft_config.is_prompt_learning
122
+ if self._is_prompt_learning:
123
+ self._peft_config = {adapter_name: peft_config}
124
+ self.base_model = model
125
+ self.add_adapter(adapter_name, peft_config)
126
+ else:
127
+ self._peft_config = None
128
+ cls = PEFT_TYPE_TO_MODEL_MAPPING[peft_config.peft_type]
129
+ self.base_model = cls(model, {adapter_name: peft_config}, adapter_name)
130
+ self.set_additional_trainable_modules(peft_config, adapter_name)
131
+
132
+ if getattr(model, "is_gradient_checkpointing", True):
133
+ model = self._prepare_model_for_gradient_checkpointing(model)
134
+
135
+ # the `pretraining_tp` is set for some models to simulate Tensor Parallelism during inference to avoid
136
+ # numerical differences, https://github.com/pytorch/pytorch/issues/76232 - to avoid any unexpected
137
+ # behavior we disable that in this line.
138
+ if hasattr(self.base_model, "config") and hasattr(self.base_model.config, "pretraining_tp"):
139
+ self.base_model.config.pretraining_tp = 1
140
+
141
+ @property
142
+ def peft_config(self) -> dict[str, PeftConfig]:
143
+ if self._is_prompt_learning:
144
+ return self._peft_config
145
+ return self.base_model.peft_config
146
+
147
+ @property
148
+ def active_adapters(self) -> list[str]:
149
+ try:
150
+ adapters = self.base_model.active_adapters
151
+ except AttributeError:
152
+ adapters = self.active_adapter
153
+ if isinstance(adapters, str):
154
+ adapters = [adapters]
155
+ return adapters
156
+
157
+ @peft_config.setter
158
+ def peft_config(self, value: dict[str, PeftConfig]):
159
+ if self._is_prompt_learning:
160
+ self._peft_config = value
161
+ else:
162
+ self.base_model.peft_config = value
163
+
164
+ def save_pretrained(
165
+ self,
166
+ save_directory: str,
167
+ safe_serialization: bool = True,
168
+ selected_adapters: Optional[list[str]] = None,
169
+ save_embedding_layers: Union[str, bool] = "auto",
170
+ is_main_process: bool = True,
171
+ **kwargs: Any,
172
+ ) -> None:
173
+ r"""
174
+ This function saves the adapter model and the adapter configuration files to a directory, so that it can be
175
+ reloaded using the [`PeftModel.from_pretrained`] class method, and also used by the [`PeftModel.push_to_hub`]
176
+ method.
177
+
178
+ Args:
179
+ save_directory (`str`):
180
+ Directory where the adapter model and configuration files will be saved (will be created if it does not
181
+ exist).
182
+ safe_serialization (`bool`, *optional*):
183
+ Whether to save the adapter files in safetensors format, defaults to `True`.
184
+ selected_adapters (`List[str]`, *optional*):
185
+ A list of adapters to be saved. If `None`, will default to all adapters.
186
+ save_embedding_layers (`Union[bool, str]`, *optional*, defaults to `"auto"`):
187
+ If `True`, save the embedding layers in addition to adapter weights. If `auto`, checks the common
188
+ embedding layers `peft.utils.other.EMBEDDING_LAYER_NAMES` in config's `target_modules` when available.
189
+ and automatically sets the boolean flag. This only works for 🤗 transformers models.
190
+ is_main_process (`bool`, *optional*):
191
+ Whether the process calling this is the main process or not. Will default to `True`. Will not save the
192
+ checkpoint if not on the main process, which is important for multi device setups (e.g. DDP).
193
+ kwargs (additional keyword arguments, *optional*):
194
+ Additional keyword arguments passed along to the `push_to_hub` method.
195
+ """
196
+ if os.path.isfile(save_directory):
197
+ raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file")
198
+
199
+ if selected_adapters is None:
200
+ selected_adapters = list(self.peft_config.keys())
201
+ else:
202
+ if any(
203
+ selected_adapter_name not in list(self.peft_config.keys())
204
+ for selected_adapter_name in selected_adapters
205
+ ):
206
+ raise ValueError(
207
+ f"You passed an invalid `selected_adapters` arguments, current supported adapter names are"
208
+ f" {list(self.peft_config.keys())} - got {selected_adapters}."
209
+ )
210
+
211
+ if is_main_process:
212
+ os.makedirs(save_directory, exist_ok=True)
213
+ self.create_or_update_model_card(save_directory)
214
+
215
+ for adapter_name in selected_adapters:
216
+ peft_config = self.peft_config[adapter_name]
217
+ # save only the trainable weights
218
+ output_state_dict = get_peft_model_state_dict(
219
+ self,
220
+ state_dict=kwargs.get("state_dict", None),
221
+ adapter_name=adapter_name,
222
+ save_embedding_layers=save_embedding_layers,
223
+ )
224
+ output_dir = os.path.join(save_directory, adapter_name) if adapter_name != "default" else save_directory
225
+ os.makedirs(output_dir, exist_ok=True)
226
+
227
+ if is_main_process and safe_serialization:
228
+ # Section copied from: https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_utils.py#L2111-L2134
229
+ # Safetensors does not allow tensor aliasing.
230
+ # We're going to remove aliases before saving
231
+ ptrs = collections.defaultdict(list)
232
+ for name, tensor in output_state_dict.items():
233
+ # Sometimes in the state_dict we have non-tensor objects.
234
+ # e.g. in bitsandbytes we have some `str` objects in the state_dict
235
+ if isinstance(tensor, torch.Tensor):
236
+ ptrs[id_tensor_storage(tensor)].append(name)
237
+ else:
238
+ # In the non-tensor case, fall back to the pointer of the object itself
239
+ ptrs[id(tensor)].append(name)
240
+
241
+ # These are all the pointers of shared tensors.
242
+ shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1}
243
+
244
+ for _, names in shared_ptrs.items():
245
+ # Here we just clone the shared tensors to avoid tensor aliasing which is
246
+ # not supported in safetensors.
247
+ for shared_tensor_name in names[1:]:
248
+ output_state_dict[shared_tensor_name] = output_state_dict[shared_tensor_name].clone()
249
+
250
+ safe_save_file(
251
+ output_state_dict,
252
+ os.path.join(output_dir, SAFETENSORS_WEIGHTS_NAME),
253
+ metadata={"format": "pt"},
254
+ )
255
+ elif is_main_process:
256
+ torch.save(output_state_dict, os.path.join(output_dir, WEIGHTS_NAME))
257
+
258
+ # save the config and change the inference mode to `True`
259
+ if peft_config.base_model_name_or_path is None:
260
+ peft_config.base_model_name_or_path = (
261
+ self.base_model.__dict__.get("name_or_path", None)
262
+ if peft_config.is_prompt_learning
263
+ else self.base_model.model.__dict__.get("name_or_path", None)
264
+ )
265
+ inference_mode = peft_config.inference_mode
266
+ peft_config.inference_mode = True
267
+
268
+ if peft_config.task_type is None:
269
+ # deal with auto mapping
270
+ base_model_class = self._get_base_model_class(
271
+ is_prompt_tuning=peft_config.is_prompt_learning,
272
+ )
273
+ parent_library = base_model_class.__module__
274
+
275
+ auto_mapping_dict = {
276
+ "base_model_class": base_model_class.__name__,
277
+ "parent_library": parent_library,
278
+ }
279
+ else:
280
+ auto_mapping_dict = None
281
+
282
+ if is_main_process:
283
+ peft_config.save_pretrained(output_dir, auto_mapping_dict=auto_mapping_dict)
284
+ peft_config.inference_mode = inference_mode
285
+
286
+ @classmethod
287
+ def from_pretrained(
288
+ cls,
289
+ model: torch.nn.Module,
290
+ model_id: Union[str, os.PathLike],
291
+ adapter_name: str = "default",
292
+ is_trainable: bool = False,
293
+ config: Optional[PeftConfig] = None,
294
+ **kwargs: Any,
295
+ ) -> PeftModel:
296
+ r"""
297
+ Instantiate a PEFT model from a pretrained model and loaded PEFT weights.
298
+
299
+ Note that the passed `model` may be modified inplace.
300
+
301
+ Args:
302
+ model ([`torch.nn.Module`]):
303
+ The model to be adapted. For 🤗 Transformers models, the model should be initialized with the
304
+ [`~transformers.PreTrainedModel.from_pretrained`].
305
+ model_id (`str` or `os.PathLike`):
306
+ The name of the PEFT configuration to use. Can be either:
307
+ - A string, the `model id` of a PEFT configuration hosted inside a model repo on the Hugging Face
308
+ Hub.
309
+ - A path to a directory containing a PEFT configuration file saved using the `save_pretrained`
310
+ method (`./my_peft_config_directory/`).
311
+ adapter_name (`str`, *optional*, defaults to `"default"`):
312
+ The name of the adapter to be loaded. This is useful for loading multiple adapters.
313
+ is_trainable (`bool`, *optional*, defaults to `False`):
314
+ Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and can only be
315
+ used for inference.
316
+ config ([`~peft.PeftConfig`], *optional*):
317
+ The configuration object to use instead of an automatically loaded configuration. This configuration
318
+ object is mutually exclusive with `model_id` and `kwargs`. This is useful when configuration is already
319
+ loaded before calling `from_pretrained`.
320
+ kwargs: (`optional`):
321
+ Additional keyword arguments passed along to the specific PEFT configuration class.
322
+ """
323
+ from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PEFT_TYPE_TO_CONFIG_MAPPING
324
+
325
+ # load the config
326
+ if config is None:
327
+ config = PEFT_TYPE_TO_CONFIG_MAPPING[
328
+ PeftConfig._get_peft_type(
329
+ model_id,
330
+ subfolder=kwargs.get("subfolder", None),
331
+ revision=kwargs.get("revision", None),
332
+ cache_dir=kwargs.get("cache_dir", None),
333
+ use_auth_token=kwargs.get("use_auth_token", None),
334
+ token=kwargs.get("token", None),
335
+ )
336
+ ].from_pretrained(model_id, **kwargs)
337
+ elif isinstance(config, PeftConfig):
338
+ config.inference_mode = not is_trainable
339
+ else:
340
+ raise ValueError(f"The input config must be a PeftConfig, got {config.__class__}")
341
+
342
+ if (getattr(model, "hf_device_map", None) is not None) and len(
343
+ set(model.hf_device_map.values()).intersection({"cpu", "disk"})
344
+ ) > 0:
345
+ remove_hook_from_submodules(model)
346
+
347
+ if config.is_prompt_learning and is_trainable:
348
+ raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.")
349
+ else:
350
+ config.inference_mode = not is_trainable
351
+
352
+ if config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys():
353
+ model = cls(model, config, adapter_name)
354
+ else:
355
+ model = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[config.task_type](model, config, adapter_name)
356
+ model.load_adapter(model_id, adapter_name, is_trainable=is_trainable, **kwargs)
357
+ return model
358
+
359
+ def _setup_prompt_encoder(self, adapter_name: str):
360
+ config = self.peft_config[adapter_name]
361
+ if not hasattr(self, "prompt_encoder"):
362
+ self.prompt_encoder = torch.nn.ModuleDict({})
363
+ self.prompt_tokens = {}
364
+ transformer_backbone = None
365
+ for name, module in self.base_model.named_children():
366
+ for param in module.parameters():
367
+ param.requires_grad = False
368
+ if isinstance(module, PreTrainedModel):
369
+ # Make sure to freeze Tranformers model
370
+ if transformer_backbone is None:
371
+ transformer_backbone = module
372
+ self.transformer_backbone_name = name
373
+ if transformer_backbone is None:
374
+ transformer_backbone = self.base_model
375
+
376
+ if config.num_transformer_submodules is None:
377
+ config.num_transformer_submodules = 2 if config.task_type == TaskType.SEQ_2_SEQ_LM else 1
378
+
379
+ for named_param, value in list(transformer_backbone.named_parameters()):
380
+ # for ZeRO-3, the tensor is sharded across accelerators and deepspeed modifies it to a tensor with shape [0]
381
+ # the actual unsharded shape is stored in "ds_shape" attribute
382
+ # special handling is needed in case the model is initialized in deepspeed.zero.Init() context or HfDeepSpeedConfig
383
+ # has been called before
384
+ # For reference refer to issue: https://github.com/huggingface/peft/issues/996
385
+ deepspeed_distributed_tensor_shape = getattr(value, "ds_shape", None)
386
+
387
+ if value.shape[0] == self.base_model.config.vocab_size or (
388
+ deepspeed_distributed_tensor_shape is not None
389
+ and deepspeed_distributed_tensor_shape[0] == self.base_model.config.vocab_size
390
+ ):
391
+ self.word_embeddings = transformer_backbone.get_submodule(named_param.replace(".weight", ""))
392
+ break
393
+
394
+ if config.peft_type == PeftType.PROMPT_TUNING:
395
+ prompt_encoder = PromptEmbedding(config, self.word_embeddings)
396
+ elif config.peft_type == PeftType.MULTITASK_PROMPT_TUNING:
397
+ prompt_encoder = MultitaskPromptEmbedding(config, self.word_embeddings)
398
+ elif config.peft_type == PeftType.P_TUNING:
399
+ prompt_encoder = PromptEncoder(config)
400
+ elif config.peft_type == PeftType.PREFIX_TUNING:
401
+ prompt_encoder = PrefixEncoder(config)
402
+ else:
403
+ raise ValueError("Not supported")
404
+
405
+ prompt_encoder = prompt_encoder.to(self.device)
406
+ self.prompt_encoder.update(torch.nn.ModuleDict({adapter_name: prompt_encoder}))
407
+ self.prompt_tokens[adapter_name] = torch.arange(
408
+ config.num_virtual_tokens * config.num_transformer_submodules
409
+ ).long()
410
+
411
+ def _prepare_model_for_gradient_checkpointing(self, model: PreTrainedModel):
412
+ r"""
413
+ Prepares the model for gradient checkpointing if necessary
414
+ """
415
+ if not (
416
+ getattr(model, "is_loaded_in_8bit", False)
417
+ or getattr(model, "is_loaded_in_4bit", False)
418
+ or getattr(model, "is_quantized", False)
419
+ ):
420
+ if hasattr(model, "enable_input_require_grads"):
421
+ model.enable_input_require_grads()
422
+ elif hasattr(model, "get_input_embeddings"):
423
+
424
+ def make_inputs_require_grad(module, input, output):
425
+ output.requires_grad_(True)
426
+
427
+ model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
428
+ return model
429
+
430
+ def get_prompt_embedding_to_save(self, adapter_name: str) -> torch.Tensor:
431
+ """
432
+ Returns the prompt embedding to save when saving the model. Only applicable when using a prompt learning
433
+ method.
434
+ """
435
+ prompt_encoder = self.prompt_encoder[adapter_name]
436
+ prompt_tokens = (
437
+ self.prompt_tokens[adapter_name].unsqueeze(0).expand(1, -1).to(prompt_encoder.embedding.weight.device)
438
+ )
439
+ if self.peft_config[adapter_name].peft_type == PeftType.PREFIX_TUNING:
440
+ prompt_tokens = prompt_tokens[:, : self.peft_config[adapter_name].num_virtual_tokens]
441
+
442
+ if self.peft_config[adapter_name].peft_type == PeftType.MULTITASK_PROMPT_TUNING:
443
+ prompt_embeddings = super(MultitaskPromptEmbedding, prompt_encoder).forward(prompt_tokens)
444
+ else:
445
+ prompt_embeddings = prompt_encoder(prompt_tokens)
446
+
447
+ return prompt_embeddings[0].detach().cpu()
448
+
449
+ def get_prompt(self, batch_size: int, task_ids: Optional[torch.Tensor] = None) -> torch.Tensor:
450
+ """
451
+ Returns the virtual prompts to use for Peft. Only applicable when using a prompt learning method.
452
+ """
453
+ peft_config = self.active_peft_config
454
+ prompt_encoder = self.prompt_encoder[self.active_adapter]
455
+ prompt_tokens = (
456
+ self.prompt_tokens[self.active_adapter]
457
+ .unsqueeze(0)
458
+ .expand(batch_size, -1)
459
+ .to(prompt_encoder.embedding.weight.device)
460
+ )
461
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
462
+ prompt_tokens = prompt_tokens[:, : peft_config.num_virtual_tokens]
463
+ if peft_config.inference_mode:
464
+ past_key_values = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1)
465
+ else:
466
+ past_key_values = prompt_encoder(prompt_tokens)
467
+ if self.base_model_torch_dtype is not None:
468
+ past_key_values = past_key_values.to(self.base_model_torch_dtype)
469
+ past_key_values = past_key_values.view(
470
+ batch_size,
471
+ peft_config.num_virtual_tokens,
472
+ peft_config.num_layers * 2,
473
+ peft_config.num_attention_heads,
474
+ peft_config.token_dim // peft_config.num_attention_heads,
475
+ )
476
+ if peft_config.num_transformer_submodules == 2:
477
+ past_key_values = torch.cat([past_key_values, past_key_values], dim=2)
478
+ past_key_values = past_key_values.permute([2, 0, 3, 1, 4]).split(
479
+ peft_config.num_transformer_submodules * 2
480
+ )
481
+ if TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING.get(self.config.model_type, None) is not None:
482
+ post_process_fn = TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING[self.config.model_type]
483
+ past_key_values = post_process_fn(past_key_values)
484
+ return past_key_values
485
+ else:
486
+ if peft_config.peft_type == PeftType.MULTITASK_PROMPT_TUNING:
487
+ prompts = prompt_encoder(prompt_tokens, task_ids)
488
+ else:
489
+ if peft_config.inference_mode:
490
+ prompts = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1)
491
+ else:
492
+ prompts = prompt_encoder(prompt_tokens)
493
+ return prompts
494
+
495
+ def get_nb_trainable_parameters(self) -> tuple[int, int]:
496
+ r"""
497
+ Returns the number of trainable parameters and the number of all parameters in the model.
498
+ """
499
+ trainable_params = 0
500
+ all_param = 0
501
+ for _, param in self.named_parameters():
502
+ num_params = param.numel()
503
+ # if using DS Zero 3 and the weights are initialized empty
504
+ if num_params == 0 and hasattr(param, "ds_numel"):
505
+ num_params = param.ds_numel
506
+
507
+ # Due to the design of 4bit linear layers from bitsandbytes
508
+ # one needs to multiply the number of parameters by 2 to get
509
+ # the correct number of parameters
510
+ if param.__class__.__name__ == "Params4bit":
511
+ num_bytes = param.quant_storage.itemsize if hasattr(param, "quant_storage") else 1
512
+ num_params = num_params * 2 * num_bytes
513
+
514
+ all_param += num_params
515
+ if param.requires_grad:
516
+ trainable_params += num_params
517
+
518
+ return trainable_params, all_param
519
+
520
+ def print_trainable_parameters(self) -> None:
521
+ """
522
+ Prints the number of trainable parameters in the model.
523
+
524
+ Note: print_trainable_parameters() uses get_nb_trainable_parameters() which is different from
525
+ num_parameters(only_trainable=True) from huggingface/transformers. get_nb_trainable_parameters() returns
526
+ (trainable parameters, all parameters) of the Peft Model which includes modified backbone transformer model.
527
+ For techniques like LoRA, the backbone transformer model is modified in place with LoRA modules. However, for
528
+ prompt tuning, the backbone transformer model is unmodified. num_parameters(only_trainable=True) returns number
529
+ of trainable parameters of the backbone transformer model which can be different.
530
+ """
531
+ trainable_params, all_param = self.get_nb_trainable_parameters()
532
+
533
+ print(
534
+ f"trainable params: {trainable_params:,d} || all params: {all_param:,d} || trainable%: {100 * trainable_params / all_param}"
535
+ )
536
+
537
+ def __getattr__(self, name: str):
538
+ """Forward missing attributes to the wrapped module."""
539
+ try:
540
+ return super().__getattr__(name) # defer to nn.Module's logic
541
+ except AttributeError:
542
+ return getattr(self.base_model, name)
543
+
544
+ @contextmanager
545
+ def _enable_peft_forward_hooks(self, *args, **kwargs):
546
+ # If the base model has a method called _enable_peft_forward_hooks, it is invoked as a context. Otherwise, this
547
+ # runs without any changes
548
+ if hasattr(self.base_model, "_enable_peft_forward_hooks"):
549
+ with self.base_model._enable_peft_forward_hooks(*args, **kwargs):
550
+ yield
551
+ return
552
+ else:
553
+ # nothing to enable
554
+ yield
555
+ return
556
+
557
+ def forward(self, *args: Any, **kwargs: Any):
558
+ """
559
+ Forward pass of the model.
560
+ """
561
+ with self._enable_peft_forward_hooks(*args, **kwargs):
562
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
563
+ return self.get_base_model()(*args, **kwargs)
564
+
565
+ def generate(self, *args, **kwargs):
566
+ with self._enable_peft_forward_hooks(*args, **kwargs):
567
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
568
+ return self.get_base_model().generate(*args, **kwargs)
569
+
570
+ def _get_base_model_class(self, is_prompt_tuning=False):
571
+ """
572
+ Returns the base model class.
573
+ """
574
+ if not is_prompt_tuning:
575
+ return self.base_model.model.__class__
576
+ return self.base_model.__class__
577
+
578
+ @contextmanager
579
+ def disable_adapter(self):
580
+ """
581
+ Context manager that disables the adapter module. Use this to run inference on the base model.
582
+
583
+ Example:
584
+
585
+ ```py
586
+ >>> with model.disable_adapter():
587
+ ... model(inputs)
588
+ ```
589
+ """
590
+ try:
591
+ if self.peft_config[self.active_adapter].is_prompt_learning:
592
+ # TODO: consider replacing this patching of methods with a more robust mechanism: setting a flag and
593
+ # letting the underlying methods deal with it, same as how LoRA does it.
594
+ old_forward = self.forward
595
+ self.forward = self.base_model.forward
596
+ old_prepare_inputs_for_generation = self.prepare_inputs_for_generation
597
+ self.prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation
598
+ else:
599
+ self.base_model.disable_adapter_layers()
600
+ yield
601
+ finally:
602
+ if self.peft_config[self.active_adapter].is_prompt_learning:
603
+ self.forward = old_forward
604
+ self.prepare_inputs_for_generation = old_prepare_inputs_for_generation
605
+ else:
606
+ self.base_model.enable_adapter_layers()
607
+
608
+ def get_base_model(self) -> torch.nn.Module:
609
+ """
610
+ Returns the base model.
611
+ """
612
+ return (
613
+ self.base_model
614
+ if (self.active_peft_config.is_prompt_learning or self.peft_type == PeftType.POLY)
615
+ else self.base_model.model
616
+ )
617
+
618
+ def add_adapter(self, adapter_name: str, peft_config: PeftConfig) -> None:
619
+ """
620
+ Add an adapter to the model based on the passed configuration.
621
+
622
+ This adapter is not trained. To load a trained adapter, check out [`PeftModel.load_adapter`].
623
+
624
+ The name for the new adapter should be unique.
625
+
626
+ The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active
627
+ adapter.
628
+
629
+ Args:
630
+ adapter_name (`str`):
631
+ The name of the adapter to be added.
632
+ peft_config ([`PeftConfig`]):
633
+ The configuration of the adapter to be added.
634
+ """
635
+ if peft_config.peft_type != self.peft_type:
636
+ raise ValueError(
637
+ f"Cannot combine adapters with different peft types. "
638
+ f"Found {self.peft_type} and {peft_config.peft_type}."
639
+ )
640
+
641
+ try:
642
+ if peft_config.is_prompt_learning:
643
+ self.peft_config[adapter_name] = peft_config
644
+ if hasattr(self.config, "to_dict"):
645
+ dict_config = self.config.to_dict()
646
+ else:
647
+ dict_config = self.config
648
+
649
+ peft_config = _prepare_prompt_learning_config(peft_config, dict_config)
650
+ self._setup_prompt_encoder(adapter_name)
651
+ elif peft_config.is_adaption_prompt:
652
+ self.base_model.add_adapter(adapter_name, peft_config)
653
+ else:
654
+ self.peft_config[adapter_name] = peft_config
655
+ self.base_model.inject_adapter(self.base_model.model, adapter_name)
656
+ except Exception: # something went wrong, roll back
657
+ if adapter_name in self.peft_config:
658
+ del self.peft_config[adapter_name]
659
+ raise
660
+
661
+ self.set_additional_trainable_modules(peft_config, adapter_name)
662
+
663
+ def set_additional_trainable_modules(self, peft_config, adapter_name):
664
+ if getattr(peft_config, "modules_to_save", None) is not None:
665
+ if self.modules_to_save is None:
666
+ self.modules_to_save = set(peft_config.modules_to_save)
667
+ else:
668
+ self.modules_to_save.update(peft_config.modules_to_save)
669
+ _set_trainable(self, adapter_name)
670
+
671
+ @classmethod
672
+ def _split_kwargs(cls, kwargs: dict[str, Any]):
673
+ _kwargs_not_in_hf_hub_download_signature = ("use_auth_token",)
674
+ hf_hub_download_kwargs = {}
675
+ other_kwargs = {}
676
+
677
+ for key, value in kwargs.items():
678
+ if key in inspect.signature(hf_hub_download).parameters or key in _kwargs_not_in_hf_hub_download_signature:
679
+ hf_hub_download_kwargs[key] = value
680
+ else:
681
+ other_kwargs[key] = value
682
+
683
+ return hf_hub_download_kwargs, other_kwargs
684
+
685
+ def load_adapter(self, model_id: str, adapter_name: str, is_trainable: bool = False, **kwargs: Any):
686
+ """
687
+ Load a trained adapter into the model.
688
+
689
+ The name for the new adapter should be unique.
690
+
691
+ The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active
692
+ adapter.
693
+
694
+ Args:
695
+ adapter_name (`str`):
696
+ The name of the adapter to be added.
697
+ peft_config ([`PeftConfig`]):
698
+ The configuration of the adapter to be added.
699
+ is_trainable (`bool`, *optional*, defaults to `False`):
700
+ Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and can only be
701
+ used for inference.
702
+ kwargs: (`optional`):
703
+ Additional arguments to modify the way the adapter is loaded, e.g. the token for Hugging Face Hub.
704
+ """
705
+ from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING
706
+
707
+ hf_hub_download_kwargs, kwargs = self._split_kwargs(kwargs)
708
+ torch_device = infer_device()
709
+
710
+ if adapter_name not in self.peft_config:
711
+ # load the config
712
+ peft_config = PEFT_TYPE_TO_CONFIG_MAPPING[
713
+ PeftConfig._get_peft_type(
714
+ model_id,
715
+ **hf_hub_download_kwargs,
716
+ )
717
+ ].from_pretrained(
718
+ model_id,
719
+ **hf_hub_download_kwargs,
720
+ )
721
+ if peft_config.is_prompt_learning and is_trainable:
722
+ raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.")
723
+ else:
724
+ peft_config.inference_mode = not is_trainable
725
+ self.add_adapter(adapter_name, peft_config)
726
+
727
+ adapters_weights = load_peft_weights(model_id, device=torch_device, **hf_hub_download_kwargs)
728
+
729
+ # load the weights into the model
730
+ load_result = set_peft_model_state_dict(self, adapters_weights, adapter_name=adapter_name)
731
+ if (
732
+ (getattr(self, "hf_device_map", None) is not None)
733
+ and (len(set(self.hf_device_map.values()).intersection({"cpu", "disk"})) > 0)
734
+ and len(self.peft_config) == 1
735
+ ):
736
+ device_map = kwargs.get("device_map", "auto")
737
+ max_memory = kwargs.get("max_memory", None)
738
+ offload_dir = kwargs.get("offload_folder", None)
739
+ offload_index = kwargs.get("offload_index", None)
740
+
741
+ dispatch_model_kwargs = {}
742
+ # Safety checker for previous `accelerate` versions
743
+ # `offload_index` was introduced in https://github.com/huggingface/accelerate/pull/873/
744
+ if "offload_index" in inspect.signature(dispatch_model).parameters:
745
+ dispatch_model_kwargs["offload_index"] = offload_index
746
+
747
+ no_split_module_classes = self._no_split_modules
748
+
749
+ if device_map != "sequential":
750
+ max_memory = get_balanced_memory(
751
+ self,
752
+ max_memory=max_memory,
753
+ no_split_module_classes=no_split_module_classes,
754
+ low_zero=(device_map == "balanced_low_0"),
755
+ )
756
+ if isinstance(device_map, str):
757
+ device_map = infer_auto_device_map(
758
+ self, max_memory=max_memory, no_split_module_classes=no_split_module_classes
759
+ )
760
+ dispatch_model(
761
+ self,
762
+ device_map=device_map,
763
+ offload_dir=offload_dir,
764
+ **dispatch_model_kwargs,
765
+ )
766
+ hook = AlignDevicesHook(io_same_device=True)
767
+ if self.peft_config[adapter_name].is_prompt_learning:
768
+ remove_hook_from_submodules(self.prompt_encoder)
769
+ add_hook_to_module(self.get_base_model(), hook)
770
+
771
+ # Set model in evaluation mode to deactivate Dropout modules by default
772
+ if not is_trainable:
773
+ self.eval()
774
+ return load_result
775
+
776
+ def set_adapter(self, adapter_name: str) -> None:
777
+ """
778
+ Sets the active adapter.
779
+
780
+ Only one adapter can be active at a time.
781
+
782
+ Additionally, this function will set the specified adapter to trainable (i.e., requires_grad=True). If this is
783
+ not desired, use the following code.
784
+
785
+ ```py
786
+ >>> for name, param in model_peft.named_parameters():
787
+ ... if ...: # some check on name (ex. if 'lora' in name)
788
+ ... param.requires_grad = False
789
+ ```
790
+
791
+ Args:
792
+ adapter_name (`str`):
793
+ The name of the adapter to be set as active. The adapter must be loaded first.
794
+ """
795
+ if adapter_name not in self.peft_config:
796
+ raise ValueError(f"Adapter {adapter_name} not found.")
797
+ self.active_adapter = adapter_name
798
+ if not self.peft_config[adapter_name].is_prompt_learning:
799
+ self.base_model.set_adapter(adapter_name)
800
+ _set_adapter(self, adapter_name)
801
+
802
+ @property
803
+ def base_model_torch_dtype(self):
804
+ return getattr(self.base_model, "dtype", None)
805
+
806
+ @property
807
+ def active_peft_config(self):
808
+ return self.peft_config[self.active_adapter]
809
+
810
+ def create_or_update_model_card(self, output_dir: str):
811
+ """
812
+ Updates or create model card to include information about peft:
813
+ 1. Adds `peft` library tag
814
+ 2. Adds peft version
815
+ 3. Adds base model info
816
+ 4. Adds quantization information if it was used
817
+ """
818
+
819
+ filename = os.path.join(output_dir, "README.md")
820
+
821
+ card = ModelCard.load(filename) if os.path.exists(filename) else ModelCard.from_template(ModelCardData())
822
+
823
+ card.data["library_name"] = "peft"
824
+
825
+ model_config = getattr(self, "config", None)
826
+ if hasattr(model_config, "to_dict"):
827
+ model_config = model_config.to_dict()
828
+ if model_config is not None and "_name_or_path" in model_config:
829
+ card.data["base_model"] = model_config["_name_or_path"]
830
+
831
+ lines = card.text.splitlines()
832
+
833
+ quantization_config = None
834
+ if hasattr(model_config, "quantization_config"):
835
+ quantization_config = self.config.quantization_config.to_dict()
836
+ training_config_text = ""
837
+ quantization_prefix = "The following `bitsandbytes` quantization config was used during training:"
838
+ # Adds quantization information if it was used
839
+ if quantization_config is not None:
840
+ training_config_text += f"\n{quantization_prefix}\n"
841
+ training_config_text += "\n".join([f"- {name}: {value}" for name, value in quantization_config.items()])
842
+ training_config_text += "\n"
843
+
844
+ training_procedure_heading = "## Training procedure"
845
+ if quantization_prefix not in lines and bool(training_config_text):
846
+ if training_procedure_heading in lines:
847
+ lines.insert(lines.index(training_procedure_heading) + 2, training_config_text)
848
+ else:
849
+ lines.append(f"{training_procedure_heading}\n{training_config_text}")
850
+
851
+ # Adds peft version
852
+ framework_block_heading = "### Framework versions"
853
+ if f"- PEFT {__version__}" not in lines:
854
+ if framework_block_heading in lines:
855
+ lines.insert(lines.index(framework_block_heading) + 2, f"- PEFT {__version__}")
856
+ else:
857
+ lines.append(f"{framework_block_heading}\n\n- PEFT {__version__}")
858
+
859
+ card.text = "\n".join(lines)
860
+ card.save(filename)
861
+
862
+
863
+ class PeftModelForSequenceClassification(PeftModel):
864
+ """
865
+ Peft model for sequence classification tasks.
866
+
867
+ Args:
868
+ model ([`~transformers.PreTrainedModel`]): Base transformer model.
869
+ peft_config ([`PeftConfig`]): Peft config.
870
+
871
+ **Attributes**:
872
+ - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model.
873
+ - **cls_layer_name** (`str`) -- The name of the classification layer.
874
+
875
+ Example:
876
+
877
+ ```py
878
+ >>> from transformers import AutoModelForSequenceClassification
879
+ >>> from peft import PeftModelForSequenceClassification, get_peft_config
880
+
881
+ >>> config = {
882
+ ... "peft_type": "PREFIX_TUNING",
883
+ ... "task_type": "SEQ_CLS",
884
+ ... "inference_mode": False,
885
+ ... "num_virtual_tokens": 20,
886
+ ... "token_dim": 768,
887
+ ... "num_transformer_submodules": 1,
888
+ ... "num_attention_heads": 12,
889
+ ... "num_layers": 12,
890
+ ... "encoder_hidden_size": 768,
891
+ ... "prefix_projection": False,
892
+ ... "postprocess_past_key_value_function": None,
893
+ ... }
894
+
895
+ >>> peft_config = get_peft_config(config)
896
+ >>> model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased")
897
+ >>> peft_model = PeftModelForSequenceClassification(model, peft_config)
898
+ >>> peft_model.print_trainable_parameters()
899
+ trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117
900
+ ```
901
+ """
902
+
903
+ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None:
904
+ super().__init__(model, peft_config, adapter_name)
905
+ if self.modules_to_save is None:
906
+ self.modules_to_save = {"classifier", "score"}
907
+ else:
908
+ self.modules_to_save.update({"classifier", "score"})
909
+
910
+ for name, _ in self.base_model.named_children():
911
+ if any(module_name in name for module_name in self.modules_to_save):
912
+ self.cls_layer_name = name
913
+ break
914
+
915
+ # to make sure classifier layer is trainable
916
+ _set_trainable(self, adapter_name)
917
+
918
+ def forward(
919
+ self,
920
+ input_ids=None,
921
+ attention_mask=None,
922
+ inputs_embeds=None,
923
+ labels=None,
924
+ output_attentions=None,
925
+ output_hidden_states=None,
926
+ return_dict=None,
927
+ task_ids=None,
928
+ **kwargs,
929
+ ):
930
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
931
+ peft_config = self.active_peft_config
932
+ if not peft_config.is_prompt_learning:
933
+ with self._enable_peft_forward_hooks(**kwargs):
934
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
935
+ if peft_config.peft_type == PeftType.POLY:
936
+ kwargs["task_ids"] = task_ids
937
+ return self.base_model(
938
+ input_ids=input_ids,
939
+ attention_mask=attention_mask,
940
+ inputs_embeds=inputs_embeds,
941
+ labels=labels,
942
+ output_attentions=output_attentions,
943
+ output_hidden_states=output_hidden_states,
944
+ return_dict=return_dict,
945
+ **kwargs,
946
+ )
947
+
948
+ batch_size = _get_batch_size(input_ids, inputs_embeds)
949
+ if attention_mask is not None:
950
+ # concat prompt attention mask
951
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
952
+ attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
953
+ if kwargs.get("position_ids", None) is not None:
954
+ warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
955
+ kwargs["position_ids"] = None
956
+ kwargs.update(
957
+ {
958
+ "attention_mask": attention_mask,
959
+ "labels": labels,
960
+ "output_attentions": output_attentions,
961
+ "output_hidden_states": output_hidden_states,
962
+ "return_dict": return_dict,
963
+ }
964
+ )
965
+
966
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
967
+ return self._prefix_tuning_forward(input_ids=input_ids, **kwargs)
968
+ else:
969
+ if kwargs.get("token_type_ids", None) is not None:
970
+ kwargs["token_type_ids"] = torch.cat(
971
+ (
972
+ torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device),
973
+ kwargs["token_type_ids"],
974
+ ),
975
+ dim=1,
976
+ ).long()
977
+ if inputs_embeds is None:
978
+ inputs_embeds = self.word_embeddings(input_ids)
979
+ prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids)
980
+ prompts = prompts.to(inputs_embeds.dtype)
981
+ inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)
982
+ return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
983
+
984
+ def _prefix_tuning_forward(
985
+ self,
986
+ input_ids=None,
987
+ attention_mask=None,
988
+ inputs_embeds=None,
989
+ labels=None,
990
+ output_attentions=None,
991
+ output_hidden_states=None,
992
+ return_dict=None,
993
+ **kwargs,
994
+ ):
995
+ batch_size = _get_batch_size(input_ids, inputs_embeds)
996
+ past_key_values = self.get_prompt(batch_size)
997
+ fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys())
998
+ kwargs.update(
999
+ {
1000
+ "input_ids": input_ids,
1001
+ "attention_mask": attention_mask,
1002
+ "inputs_embeds": inputs_embeds,
1003
+ "output_attentions": output_attentions,
1004
+ "output_hidden_states": output_hidden_states,
1005
+ "return_dict": return_dict,
1006
+ "past_key_values": past_key_values,
1007
+ }
1008
+ )
1009
+ if "past_key_values" in fwd_params:
1010
+ return self.base_model(labels=labels, **kwargs)
1011
+ else:
1012
+ transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name)
1013
+ fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys())
1014
+ if "past_key_values" not in fwd_params:
1015
+ raise ValueError("Model does not support past key values which are required for prefix tuning.")
1016
+ outputs = transformer_backbone_name(**kwargs)
1017
+ pooled_output = outputs[1] if len(outputs) > 1 else outputs[0]
1018
+ if "dropout" in [name for name, _ in list(self.base_model.named_children())]:
1019
+ pooled_output = self.base_model.dropout(pooled_output)
1020
+ logits = self.base_model.get_submodule(self.cls_layer_name)(pooled_output)
1021
+
1022
+ loss = None
1023
+ if labels is not None:
1024
+ if self.config.problem_type is None:
1025
+ if self.base_model.num_labels == 1:
1026
+ self.config.problem_type = "regression"
1027
+ elif self.base_model.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1028
+ self.config.problem_type = "single_label_classification"
1029
+ else:
1030
+ self.config.problem_type = "multi_label_classification"
1031
+
1032
+ if self.config.problem_type == "regression":
1033
+ loss_fct = MSELoss()
1034
+ if self.base_model.num_labels == 1:
1035
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1036
+ else:
1037
+ loss = loss_fct(logits, labels)
1038
+ elif self.config.problem_type == "single_label_classification":
1039
+ loss_fct = CrossEntropyLoss()
1040
+ loss = loss_fct(logits.view(-1, self.base_model.num_labels), labels.view(-1))
1041
+ elif self.config.problem_type == "multi_label_classification":
1042
+ loss_fct = BCEWithLogitsLoss()
1043
+ loss = loss_fct(logits, labels)
1044
+ if not return_dict:
1045
+ output = (logits,) + outputs[2:]
1046
+ return ((loss,) + output) if loss is not None else output
1047
+
1048
+ return SequenceClassifierOutput(
1049
+ loss=loss,
1050
+ logits=logits,
1051
+ hidden_states=outputs.hidden_states,
1052
+ attentions=outputs.attentions,
1053
+ )
1054
+
1055
+
1056
+ class PeftModelForCausalLM(PeftModel):
1057
+ """
1058
+ Peft model for causal language modeling.
1059
+
1060
+ Args:
1061
+ model ([`~transformers.PreTrainedModel`]): Base transformer model.
1062
+ peft_config ([`PeftConfig`]): Peft config.
1063
+
1064
+
1065
+ Example:
1066
+
1067
+ ```py
1068
+ >>> from transformers import AutoModelForCausalLM
1069
+ >>> from peft import PeftModelForCausalLM, get_peft_config
1070
+
1071
+ >>> config = {
1072
+ ... "peft_type": "PREFIX_TUNING",
1073
+ ... "task_type": "CAUSAL_LM",
1074
+ ... "inference_mode": False,
1075
+ ... "num_virtual_tokens": 20,
1076
+ ... "token_dim": 1280,
1077
+ ... "num_transformer_submodules": 1,
1078
+ ... "num_attention_heads": 20,
1079
+ ... "num_layers": 36,
1080
+ ... "encoder_hidden_size": 1280,
1081
+ ... "prefix_projection": False,
1082
+ ... "postprocess_past_key_value_function": None,
1083
+ ... }
1084
+
1085
+ >>> peft_config = get_peft_config(config)
1086
+ >>> model = AutoModelForCausalLM.from_pretrained("gpt2-large")
1087
+ >>> peft_model = PeftModelForCausalLM(model, peft_config)
1088
+ >>> peft_model.print_trainable_parameters()
1089
+ trainable params: 1843200 || all params: 775873280 || trainable%: 0.23756456724479544
1090
+ ```
1091
+ """
1092
+
1093
+ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None:
1094
+ super().__init__(model, peft_config, adapter_name)
1095
+ self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation
1096
+
1097
+ def forward(
1098
+ self,
1099
+ input_ids=None,
1100
+ attention_mask=None,
1101
+ inputs_embeds=None,
1102
+ labels=None,
1103
+ output_attentions=None,
1104
+ output_hidden_states=None,
1105
+ return_dict=None,
1106
+ task_ids=None,
1107
+ **kwargs,
1108
+ ):
1109
+ peft_config = self.active_peft_config
1110
+ if not peft_config.is_prompt_learning:
1111
+ if self.base_model.config.model_type == "mpt":
1112
+ if inputs_embeds is not None:
1113
+ raise AssertionError("forward in MPTForCausalLM does not support inputs_embeds")
1114
+ return self.base_model(
1115
+ input_ids=input_ids,
1116
+ attention_mask=attention_mask,
1117
+ labels=labels,
1118
+ output_attentions=output_attentions,
1119
+ output_hidden_states=output_hidden_states,
1120
+ return_dict=return_dict,
1121
+ **kwargs,
1122
+ )
1123
+
1124
+ if peft_config.peft_type == PeftType.POLY:
1125
+ kwargs["task_ids"] = task_ids
1126
+
1127
+ with self._enable_peft_forward_hooks(**kwargs):
1128
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
1129
+ return self.base_model(
1130
+ input_ids=input_ids,
1131
+ attention_mask=attention_mask,
1132
+ inputs_embeds=inputs_embeds,
1133
+ labels=labels,
1134
+ output_attentions=output_attentions,
1135
+ output_hidden_states=output_hidden_states,
1136
+ return_dict=return_dict,
1137
+ **kwargs,
1138
+ )
1139
+
1140
+ batch_size = _get_batch_size(input_ids, inputs_embeds)
1141
+ if attention_mask is not None:
1142
+ # concat prompt attention mask
1143
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
1144
+ attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
1145
+
1146
+ if kwargs.get("position_ids", None) is not None:
1147
+ warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
1148
+ kwargs["position_ids"] = None
1149
+ if kwargs.get("token_type_ids", None) is not None:
1150
+ warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids")
1151
+ kwargs["token_type_ids"] = None
1152
+ kwargs.update(
1153
+ {
1154
+ "attention_mask": attention_mask,
1155
+ "labels": labels,
1156
+ "output_attentions": output_attentions,
1157
+ "output_hidden_states": output_hidden_states,
1158
+ "return_dict": return_dict,
1159
+ }
1160
+ )
1161
+
1162
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
1163
+ past_key_values = self.get_prompt(batch_size)
1164
+ return self.base_model(
1165
+ input_ids=input_ids, inputs_embeds=inputs_embeds, past_key_values=past_key_values, **kwargs
1166
+ )
1167
+ else:
1168
+ if inputs_embeds is None:
1169
+ inputs_embeds = self.word_embeddings(input_ids)
1170
+ # concat prompt labels
1171
+ if labels is not None:
1172
+ prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(labels.device)
1173
+ kwargs["labels"] = torch.cat((prefix_labels, labels), dim=1)
1174
+ prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids)
1175
+ prompts = prompts.to(inputs_embeds.dtype)
1176
+ inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)
1177
+ return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
1178
+
1179
+ def generate(self, *args, **kwargs):
1180
+ peft_config = self.active_peft_config
1181
+ self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation
1182
+ if hasattr(self.base_model, "model"):
1183
+ self.base_model.model.generation_config = self.generation_config
1184
+ else:
1185
+ self.base_model.generation_config = self.generation_config
1186
+ try:
1187
+ if not peft_config.is_prompt_learning:
1188
+ with self._enable_peft_forward_hooks(*args, **kwargs):
1189
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
1190
+ outputs = self.base_model.generate(*args, **kwargs)
1191
+ else:
1192
+ outputs = self.base_model.generate(**kwargs)
1193
+ except:
1194
+ self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation
1195
+ raise
1196
+ else:
1197
+ self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation
1198
+ return outputs
1199
+
1200
+ def prepare_inputs_for_generation(self, *args, task_ids: Optional[torch.Tensor] = None, **kwargs):
1201
+ peft_config = self.active_peft_config
1202
+ model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs)
1203
+
1204
+ # https://github.com/huggingface/transformers/pull/26681/ introduced new cache format
1205
+ # for some architectures which requires a special fix for prompt tuning etc.
1206
+ # TODO: starting with transformers 4.38, all architectures should support caching.
1207
+ uses_transformers_4_38 = packaging.version.parse(transformers.__version__) >= packaging.version.parse("4.38.0")
1208
+ uses_transformers_4_36 = packaging.version.parse(transformers.__version__) >= packaging.version.parse("4.36.0")
1209
+ transformers_new_cache_archs = ["llama", "mistral", "persimmon", "phi"]
1210
+ uses_cache = uses_transformers_4_38 or (
1211
+ uses_transformers_4_36 and self.base_model.config.model_type in transformers_new_cache_archs
1212
+ )
1213
+
1214
+ if peft_config.peft_type == PeftType.POLY:
1215
+ model_kwargs["task_ids"] = task_ids
1216
+ if peft_config.is_prompt_learning:
1217
+ if uses_cache and (model_kwargs["past_key_values"] is not None):
1218
+ # change in the logic of `prepare_inputs_for_generation` makes the below code necessary
1219
+ # In prompt learning methods, past key values are longer when compared to the `input_ids`.
1220
+ # As such only consider the last input ids in the autogressive generation phase.
1221
+ if model_kwargs["past_key_values"][0][0].shape[-2] >= model_kwargs["input_ids"].shape[1]:
1222
+ model_kwargs["input_ids"] = model_kwargs["input_ids"][:, -1:]
1223
+
1224
+ if model_kwargs.get("attention_mask", None) is not None:
1225
+ size = model_kwargs["input_ids"].shape[0], peft_config.num_virtual_tokens
1226
+ prefix_attention_mask = torch.ones(size).to(model_kwargs["input_ids"].device)
1227
+ model_kwargs["attention_mask"] = torch.cat(
1228
+ (prefix_attention_mask, model_kwargs["attention_mask"]), dim=1
1229
+ )
1230
+
1231
+ if model_kwargs.get("position_ids", None) is not None:
1232
+ warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
1233
+ model_kwargs["position_ids"] = None
1234
+
1235
+ if kwargs.get("token_type_ids", None) is not None:
1236
+ warnings.warn(
1237
+ "Token type ids are not supported for parameter efficient tuning. Ignoring token type ids"
1238
+ )
1239
+ kwargs["token_type_ids"] = None
1240
+
1241
+ if model_kwargs["past_key_values"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING:
1242
+ past_key_values = self.get_prompt(batch_size=model_kwargs["input_ids"].shape[0])
1243
+ model_kwargs["past_key_values"] = past_key_values
1244
+ else:
1245
+ if model_kwargs["past_key_values"] is None:
1246
+ inputs_embeds = self.word_embeddings(model_kwargs["input_ids"])
1247
+ prompts = self.get_prompt(batch_size=model_kwargs["input_ids"].shape[0], task_ids=task_ids)
1248
+ prompts = prompts.to(inputs_embeds.dtype)
1249
+ model_kwargs["inputs_embeds"] = torch.cat((prompts, inputs_embeds), dim=1)
1250
+ model_kwargs["input_ids"] = None
1251
+
1252
+ # For transformers>=4.38.0 - for some architectures such as Llama, `cache_position` is
1253
+ # passed in the forward pass to keep track of the position ids of the cache. We have to
1254
+ # pop that from `model_kwargs` as `cache_position` is properly created by the model, using the passed
1255
+ # `inputs_embeds`: https://github.com/huggingface/transformers/blob/593230f0a1150ea9c0477b9d859f25daf73c8c33/src/transformers/models/llama/modeling_llama.py#L956
1256
+ _ = model_kwargs.pop("cache_position", None)
1257
+
1258
+ return model_kwargs
1259
+
1260
+
1261
+ class PeftModelForSeq2SeqLM(PeftModel):
1262
+ """
1263
+ Peft model for sequence-to-sequence language modeling.
1264
+
1265
+ Args:
1266
+ model ([`~transformers.PreTrainedModel`]): Base transformer model.
1267
+ peft_config ([`PeftConfig`]): Peft config.
1268
+
1269
+
1270
+ Example:
1271
+
1272
+ ```py
1273
+ >>> from transformers import AutoModelForSeq2SeqLM
1274
+ >>> from peft import PeftModelForSeq2SeqLM, get_peft_config
1275
+
1276
+ >>> config = {
1277
+ ... "peft_type": "LORA",
1278
+ ... "task_type": "SEQ_2_SEQ_LM",
1279
+ ... "inference_mode": False,
1280
+ ... "r": 8,
1281
+ ... "target_modules": ["q", "v"],
1282
+ ... "lora_alpha": 32,
1283
+ ... "lora_dropout": 0.1,
1284
+ ... "fan_in_fan_out": False,
1285
+ ... "enable_lora": None,
1286
+ ... "bias": "none",
1287
+ ... }
1288
+
1289
+ >>> peft_config = get_peft_config(config)
1290
+ >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
1291
+ >>> peft_model = PeftModelForSeq2SeqLM(model, peft_config)
1292
+ >>> peft_model.print_trainable_parameters()
1293
+ trainable params: 884736 || all params: 223843584 || trainable%: 0.3952474242013566
1294
+ ```
1295
+ """
1296
+
1297
+ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None:
1298
+ super().__init__(model, peft_config, adapter_name)
1299
+ self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation
1300
+ self.base_model_prepare_encoder_decoder_kwargs_for_generation = (
1301
+ self.base_model._prepare_encoder_decoder_kwargs_for_generation
1302
+ )
1303
+
1304
+ def forward(
1305
+ self,
1306
+ input_ids=None,
1307
+ attention_mask=None,
1308
+ inputs_embeds=None,
1309
+ decoder_input_ids=None,
1310
+ decoder_attention_mask=None,
1311
+ decoder_inputs_embeds=None,
1312
+ labels=None,
1313
+ output_attentions=None,
1314
+ output_hidden_states=None,
1315
+ return_dict=None,
1316
+ task_ids=None,
1317
+ **kwargs,
1318
+ ):
1319
+ peft_config = self.active_peft_config
1320
+ if not peft_config.is_prompt_learning:
1321
+ if peft_config.peft_type == PeftType.POLY:
1322
+ kwargs["task_ids"] = task_ids
1323
+
1324
+ with self._enable_peft_forward_hooks(**kwargs):
1325
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
1326
+ return self.base_model(
1327
+ input_ids=input_ids,
1328
+ attention_mask=attention_mask,
1329
+ inputs_embeds=inputs_embeds,
1330
+ decoder_input_ids=decoder_input_ids,
1331
+ decoder_attention_mask=decoder_attention_mask,
1332
+ decoder_inputs_embeds=decoder_inputs_embeds,
1333
+ labels=labels,
1334
+ output_attentions=output_attentions,
1335
+ output_hidden_states=output_hidden_states,
1336
+ return_dict=return_dict,
1337
+ **kwargs,
1338
+ )
1339
+
1340
+ batch_size = _get_batch_size(input_ids, inputs_embeds)
1341
+ if decoder_attention_mask is not None:
1342
+ # concat prompt attention mask
1343
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(
1344
+ decoder_attention_mask.device
1345
+ )
1346
+ if peft_config.peft_type not in [PeftType.PROMPT_TUNING, PeftType.P_TUNING]:
1347
+ decoder_attention_mask = torch.cat((prefix_attention_mask, decoder_attention_mask), dim=1)
1348
+
1349
+ if kwargs.get("position_ids", None) is not None:
1350
+ warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
1351
+ kwargs["position_ids"] = None
1352
+ if kwargs.get("token_type_ids", None) is not None:
1353
+ warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids")
1354
+ kwargs["token_type_ids"] = None
1355
+ kwargs.update(
1356
+ {
1357
+ "attention_mask": attention_mask,
1358
+ "decoder_attention_mask": decoder_attention_mask,
1359
+ "labels": labels,
1360
+ "output_attentions": output_attentions,
1361
+ "output_hidden_states": output_hidden_states,
1362
+ "return_dict": return_dict,
1363
+ }
1364
+ )
1365
+
1366
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
1367
+ past_key_values = self.get_prompt(batch_size)
1368
+ return self.base_model(
1369
+ input_ids=input_ids,
1370
+ decoder_input_ids=decoder_input_ids,
1371
+ decoder_inputs_embeds=decoder_inputs_embeds,
1372
+ past_key_values=past_key_values,
1373
+ **kwargs,
1374
+ )
1375
+ elif peft_config.peft_type in [PeftType.PROMPT_TUNING, PeftType.P_TUNING]:
1376
+ if inputs_embeds is None:
1377
+ inputs_embeds = self.word_embeddings(input_ids)
1378
+
1379
+ if attention_mask is not None:
1380
+ # concat prompt attention mask
1381
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(
1382
+ attention_mask.device
1383
+ )
1384
+ kwargs["attention_mask"] = torch.cat((prefix_attention_mask, attention_mask), dim=1)
1385
+
1386
+ prompts = self.get_prompt(batch_size=batch_size)
1387
+ prompts = prompts.to(inputs_embeds.dtype)
1388
+ inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1)
1389
+
1390
+ return self.base_model(
1391
+ inputs_embeds=inputs_embeds,
1392
+ decoder_input_ids=decoder_input_ids,
1393
+ decoder_inputs_embeds=decoder_inputs_embeds,
1394
+ **kwargs,
1395
+ )
1396
+ else:
1397
+ if inputs_embeds is None:
1398
+ inputs_embeds = self.word_embeddings(input_ids)
1399
+ if decoder_inputs_embeds is None and decoder_input_ids is None:
1400
+ decoder_input_ids = shift_tokens_right(
1401
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
1402
+ )
1403
+ decoder_inputs_embeds = self.word_embeddings(decoder_input_ids)
1404
+
1405
+ if attention_mask is not None:
1406
+ # concat prompt attention mask
1407
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(
1408
+ attention_mask.device
1409
+ )
1410
+ kwargs["attention_mask"] = torch.cat((prefix_attention_mask, attention_mask), dim=1)
1411
+ # concat prompt labels
1412
+ if labels is not None:
1413
+ if peft_config.num_transformer_submodules == 1:
1414
+ kwargs["labels"] = labels
1415
+ elif peft_config.num_transformer_submodules == 2:
1416
+ prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(labels.device)
1417
+ kwargs["labels"] = torch.cat((prefix_labels, labels), dim=1)
1418
+ prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids)
1419
+ prompts = prompts.to(inputs_embeds.dtype)
1420
+ inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1)
1421
+ if peft_config.num_transformer_submodules == 1:
1422
+ return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
1423
+ elif peft_config.num_transformer_submodules == 2:
1424
+ decoder_inputs_embeds = torch.cat(
1425
+ (prompts[:, peft_config.num_virtual_tokens :], decoder_inputs_embeds), dim=1
1426
+ )
1427
+ return self.base_model(
1428
+ inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, **kwargs
1429
+ )
1430
+
1431
+ def generate(self, **kwargs):
1432
+ peft_config = self.active_peft_config
1433
+ self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation
1434
+ self.base_model._prepare_encoder_decoder_kwargs_for_generation = (
1435
+ self._prepare_encoder_decoder_kwargs_for_generation
1436
+ )
1437
+ try:
1438
+ if not peft_config.is_prompt_learning:
1439
+ with self._enable_peft_forward_hooks(**kwargs):
1440
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
1441
+ outputs = self.base_model.generate(**kwargs)
1442
+ else:
1443
+ if "input_ids" not in kwargs:
1444
+ raise ValueError("input_ids must be provided for Peft model generation")
1445
+ if kwargs.get("position_ids", None) is not None:
1446
+ warnings.warn(
1447
+ "Position ids are not supported for parameter efficient tuning. Ignoring position ids."
1448
+ )
1449
+ kwargs["position_ids"] = None
1450
+ if kwargs.get("token_type_ids", None) is not None:
1451
+ warnings.warn(
1452
+ "Token type ids are not supported for parameter efficient tuning. Ignoring token type ids"
1453
+ )
1454
+ kwargs["token_type_ids"] = None
1455
+
1456
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
1457
+ outputs = self.base_model.generate(**kwargs)
1458
+ elif peft_config.peft_type in [
1459
+ PeftType.PROMPT_TUNING,
1460
+ PeftType.P_TUNING,
1461
+ PeftType.MULTITASK_PROMPT_TUNING,
1462
+ ]:
1463
+ kwargs = deepcopy(kwargs)
1464
+
1465
+ if "encoder_outputs" in kwargs:
1466
+ del kwargs["encoder_outputs"]
1467
+ warnings.warn(
1468
+ "`encoder_outputs` should not be passed to `generate` when using prompt tuning. Ignoring it."
1469
+ )
1470
+
1471
+ input_ids = kwargs.pop("input_ids")
1472
+ inputs_embeds = self.word_embeddings(input_ids)
1473
+ batch_size = inputs_embeds.shape[0]
1474
+ prompts = self.get_prompt(batch_size=batch_size, task_ids=kwargs.pop("task_ids", None))
1475
+ prompts = prompts.to(inputs_embeds.dtype)
1476
+
1477
+ inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1)
1478
+ kwargs["inputs_embeds"] = inputs_embeds
1479
+
1480
+ if "attention_mask" in kwargs:
1481
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(
1482
+ kwargs["attention_mask"].device
1483
+ )
1484
+ kwargs["attention_mask"] = torch.cat((prefix_attention_mask, kwargs["attention_mask"]), dim=1)
1485
+
1486
+ return self.base_model.generate(**kwargs)
1487
+ else:
1488
+ raise NotImplementedError
1489
+ except:
1490
+ self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation
1491
+ self.base_model._prepare_encoder_decoder_kwargs_for_generation = (
1492
+ self.base_model_prepare_encoder_decoder_kwargs_for_generation
1493
+ )
1494
+ raise
1495
+ else:
1496
+ self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation
1497
+ self.base_model._prepare_encoder_decoder_kwargs_for_generation = (
1498
+ self.base_model_prepare_encoder_decoder_kwargs_for_generation
1499
+ )
1500
+ return outputs
1501
+
1502
+ def prepare_inputs_for_generation(self, *args, task_ids: torch.Tensor = None, **kwargs):
1503
+ peft_config = self.active_peft_config
1504
+ model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs)
1505
+ if peft_config.peft_type == PeftType.POLY:
1506
+ model_kwargs["task_ids"] = task_ids
1507
+ if model_kwargs["past_key_values"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING:
1508
+ batch_size = model_kwargs["decoder_input_ids"].shape[0]
1509
+ past_key_values = self.get_prompt(batch_size)
1510
+ model_kwargs["past_key_values"] = past_key_values
1511
+
1512
+ return model_kwargs
1513
+
1514
+
1515
+ class PeftModelForTokenClassification(PeftModel):
1516
+ """
1517
+ Peft model for token classification tasks.
1518
+
1519
+ Args:
1520
+ model ([`~transformers.PreTrainedModel`]): Base transformer model.
1521
+ peft_config ([`PeftConfig`]): Peft config.
1522
+
1523
+ **Attributes**:
1524
+ - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model.
1525
+ - **cls_layer_name** (`str`) -- The name of the classification layer.
1526
+
1527
+ Example:
1528
+
1529
+ ```py
1530
+ >>> from transformers import AutoModelForSequenceClassification
1531
+ >>> from peft import PeftModelForTokenClassification, get_peft_config
1532
+
1533
+ >>> config = {
1534
+ ... "peft_type": "PREFIX_TUNING",
1535
+ ... "task_type": "TOKEN_CLS",
1536
+ ... "inference_mode": False,
1537
+ ... "num_virtual_tokens": 20,
1538
+ ... "token_dim": 768,
1539
+ ... "num_transformer_submodules": 1,
1540
+ ... "num_attention_heads": 12,
1541
+ ... "num_layers": 12,
1542
+ ... "encoder_hidden_size": 768,
1543
+ ... "prefix_projection": False,
1544
+ ... "postprocess_past_key_value_function": None,
1545
+ ... }
1546
+
1547
+ >>> peft_config = get_peft_config(config)
1548
+ >>> model = AutoModelForTokenClassification.from_pretrained("bert-base-cased")
1549
+ >>> peft_model = PeftModelForTokenClassification(model, peft_config)
1550
+ >>> peft_model.print_trainable_parameters()
1551
+ trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117
1552
+ ```
1553
+ """
1554
+
1555
+ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig = None, adapter_name: str = "default") -> None:
1556
+ super().__init__(model, peft_config, adapter_name)
1557
+ if self.modules_to_save is None:
1558
+ self.modules_to_save = {"classifier", "score"}
1559
+ else:
1560
+ self.modules_to_save.update({"classifier", "score"})
1561
+
1562
+ for name, _ in self.base_model.named_children():
1563
+ if any(module_name in name for module_name in self.modules_to_save):
1564
+ self.cls_layer_name = name
1565
+ break
1566
+
1567
+ # to make sure classifier layer is trainable
1568
+ _set_trainable(self, adapter_name)
1569
+
1570
+ def forward(
1571
+ self,
1572
+ input_ids=None,
1573
+ attention_mask=None,
1574
+ inputs_embeds=None,
1575
+ labels=None,
1576
+ output_attentions=None,
1577
+ output_hidden_states=None,
1578
+ return_dict=None,
1579
+ task_ids=None,
1580
+ **kwargs,
1581
+ ):
1582
+ peft_config = self.active_peft_config
1583
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1584
+
1585
+ if not peft_config.is_prompt_learning:
1586
+ with self._enable_peft_forward_hooks(**kwargs):
1587
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
1588
+ if peft_config.peft_type == PeftType.POLY:
1589
+ kwargs["task_ids"] = task_ids
1590
+ return self.base_model(
1591
+ input_ids=input_ids,
1592
+ attention_mask=attention_mask,
1593
+ inputs_embeds=inputs_embeds,
1594
+ labels=labels,
1595
+ output_attentions=output_attentions,
1596
+ output_hidden_states=output_hidden_states,
1597
+ return_dict=return_dict,
1598
+ **kwargs,
1599
+ )
1600
+
1601
+ batch_size = _get_batch_size(input_ids, inputs_embeds)
1602
+ if attention_mask is not None:
1603
+ # concat prompt attention mask
1604
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
1605
+ attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
1606
+ if kwargs.get("position_ids", None) is not None:
1607
+ warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
1608
+ kwargs["position_ids"] = None
1609
+ kwargs.update(
1610
+ {
1611
+ "attention_mask": attention_mask,
1612
+ "labels": labels,
1613
+ "output_attentions": output_attentions,
1614
+ "output_hidden_states": output_hidden_states,
1615
+ "return_dict": return_dict,
1616
+ }
1617
+ )
1618
+
1619
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
1620
+ return self._prefix_tuning_forward(input_ids=input_ids, **kwargs)
1621
+ else:
1622
+ if kwargs.get("token_type_ids", None) is not None:
1623
+ kwargs["token_type_ids"] = torch.cat(
1624
+ (
1625
+ torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device),
1626
+ kwargs["token_type_ids"],
1627
+ ),
1628
+ dim=1,
1629
+ ).long()
1630
+ if inputs_embeds is None:
1631
+ inputs_embeds = self.word_embeddings(input_ids)
1632
+ prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids)
1633
+ prompts = prompts.to(inputs_embeds.dtype)
1634
+ inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)
1635
+ return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
1636
+
1637
+ def _prefix_tuning_forward(
1638
+ self,
1639
+ input_ids=None,
1640
+ attention_mask=None,
1641
+ inputs_embeds=None,
1642
+ labels=None,
1643
+ output_attentions=None,
1644
+ output_hidden_states=None,
1645
+ return_dict=None,
1646
+ **kwargs,
1647
+ ):
1648
+ batch_size = _get_batch_size(input_ids, inputs_embeds)
1649
+ past_key_values = self.get_prompt(batch_size)
1650
+ fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys())
1651
+ kwargs.update(
1652
+ {
1653
+ "input_ids": input_ids,
1654
+ "attention_mask": attention_mask,
1655
+ "inputs_embeds": inputs_embeds,
1656
+ "output_attentions": output_attentions,
1657
+ "output_hidden_states": output_hidden_states,
1658
+ "return_dict": return_dict,
1659
+ "past_key_values": past_key_values,
1660
+ }
1661
+ )
1662
+ if "past_key_values" in fwd_params:
1663
+ return self.base_model(labels=labels, **kwargs)
1664
+ else:
1665
+ transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name)
1666
+ fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys())
1667
+ if "past_key_values" not in fwd_params:
1668
+ raise ValueError("Model does not support past key values which are required for prefix tuning.")
1669
+ outputs = transformer_backbone_name(**kwargs)
1670
+ sequence_output = outputs[0]
1671
+ if "dropout" in [name for name, _ in list(self.base_model.named_children())]:
1672
+ sequence_output = self.base_model.dropout(sequence_output)
1673
+ logits = self.base_model.get_submodule(self.cls_layer_name)(sequence_output)
1674
+
1675
+ loss = None
1676
+ if labels is not None:
1677
+ loss_fct = CrossEntropyLoss()
1678
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1679
+
1680
+ if not return_dict:
1681
+ output = (logits,) + outputs[2:]
1682
+ return ((loss,) + output) if loss is not None else output
1683
+
1684
+ return TokenClassifierOutput(
1685
+ loss=loss,
1686
+ logits=logits,
1687
+ hidden_states=outputs.hidden_states,
1688
+ attentions=outputs.attentions,
1689
+ )
1690
+
1691
+
1692
+ class PeftModelForQuestionAnswering(PeftModel):
1693
+ """
1694
+ Peft model for extractive question answering.
1695
+
1696
+ Args:
1697
+ model ([`~transformers.PreTrainedModel`]): Base transformer model.
1698
+ peft_config ([`PeftConfig`]): Peft config.
1699
+
1700
+ **Attributes**:
1701
+ - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model.
1702
+ - **cls_layer_name** (`str`) -- The name of the classification layer.
1703
+
1704
+ Example:
1705
+
1706
+ ```py
1707
+ >>> from transformers import AutoModelForQuestionAnswering
1708
+ >>> from peft import PeftModelForQuestionAnswering, get_peft_config
1709
+
1710
+ >>> config = {
1711
+ ... "peft_type": "LORA",
1712
+ ... "task_type": "QUESTION_ANS",
1713
+ ... "inference_mode": False,
1714
+ ... "r": 16,
1715
+ ... "target_modules": ["query", "value"],
1716
+ ... "lora_alpha": 32,
1717
+ ... "lora_dropout": 0.05,
1718
+ ... "fan_in_fan_out": False,
1719
+ ... "bias": "none",
1720
+ ... }
1721
+
1722
+ >>> peft_config = get_peft_config(config)
1723
+ >>> model = AutoModelForQuestionAnswering.from_pretrained("bert-base-cased")
1724
+ >>> peft_model = PeftModelForQuestionAnswering(model, peft_config)
1725
+ >>> peft_model.print_trainable_parameters()
1726
+ trainable params: 592900 || all params: 108312580 || trainable%: 0.5473971721475013
1727
+ ```
1728
+ """
1729
+
1730
+ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None:
1731
+ super().__init__(model, peft_config, adapter_name)
1732
+ if self.modules_to_save is None:
1733
+ self.modules_to_save = {"qa_outputs"}
1734
+ else:
1735
+ self.modules_to_save.update({"qa_outputs"})
1736
+
1737
+ for name, _ in self.base_model.named_children():
1738
+ if any(module_name in name for module_name in self.modules_to_save):
1739
+ self.cls_layer_name = name
1740
+ break
1741
+
1742
+ # to make sure classifier layer is trainable
1743
+ _set_trainable(self, adapter_name)
1744
+
1745
+ def forward(
1746
+ self,
1747
+ input_ids=None,
1748
+ attention_mask=None,
1749
+ token_type_ids=None,
1750
+ position_ids=None,
1751
+ inputs_embeds=None,
1752
+ start_positions=None,
1753
+ end_positions=None,
1754
+ output_attentions=None,
1755
+ output_hidden_states=None,
1756
+ return_dict=None,
1757
+ task_ids=None,
1758
+ **kwargs,
1759
+ ):
1760
+ peft_config = self.active_peft_config
1761
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1762
+
1763
+ if not peft_config.is_prompt_learning:
1764
+ if peft_config.peft_type == PeftType.POLY:
1765
+ kwargs["task_ids"] = task_ids
1766
+
1767
+ with self._enable_peft_forward_hooks(**kwargs):
1768
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
1769
+ return self.base_model(
1770
+ input_ids=input_ids,
1771
+ attention_mask=attention_mask,
1772
+ inputs_embeds=inputs_embeds,
1773
+ start_positions=start_positions,
1774
+ end_positions=end_positions,
1775
+ output_attentions=output_attentions,
1776
+ output_hidden_states=output_hidden_states,
1777
+ return_dict=return_dict,
1778
+ **kwargs,
1779
+ )
1780
+
1781
+ batch_size = _get_batch_size(input_ids, inputs_embeds)
1782
+ if attention_mask is not None:
1783
+ # concat prompt attention mask
1784
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
1785
+ attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
1786
+ if kwargs.get("position_ids", None) is not None:
1787
+ warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
1788
+ kwargs["position_ids"] = None
1789
+ kwargs.update(
1790
+ {
1791
+ "attention_mask": attention_mask,
1792
+ "start_positions": start_positions,
1793
+ "end_positions": end_positions,
1794
+ "output_attentions": output_attentions,
1795
+ "output_hidden_states": output_hidden_states,
1796
+ "return_dict": return_dict,
1797
+ }
1798
+ )
1799
+
1800
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
1801
+ return self._prefix_tuning_forward(input_ids=input_ids, **kwargs)
1802
+ else:
1803
+ if kwargs.get("token_type_ids", None) is not None:
1804
+ kwargs["token_type_ids"] = torch.cat(
1805
+ (
1806
+ torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device),
1807
+ kwargs["token_type_ids"],
1808
+ ),
1809
+ dim=1,
1810
+ ).long()
1811
+ if inputs_embeds is None:
1812
+ inputs_embeds = self.word_embeddings(input_ids)
1813
+ prompts = self.get_prompt(batch_size=batch_size)
1814
+ prompts = prompts.to(inputs_embeds.dtype)
1815
+ inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)
1816
+ return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
1817
+
1818
+ def _prefix_tuning_forward(
1819
+ self,
1820
+ input_ids=None,
1821
+ attention_mask=None,
1822
+ inputs_embeds=None,
1823
+ start_positions=None,
1824
+ end_positions=None,
1825
+ output_attentions=None,
1826
+ output_hidden_states=None,
1827
+ return_dict=None,
1828
+ **kwargs,
1829
+ ):
1830
+ batch_size = _get_batch_size(input_ids, inputs_embeds)
1831
+ past_key_values = self.get_prompt(batch_size)
1832
+ fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys())
1833
+ kwargs.update(
1834
+ {
1835
+ "input_ids": input_ids,
1836
+ "attention_mask": attention_mask,
1837
+ "inputs_embeds": inputs_embeds,
1838
+ "output_attentions": output_attentions,
1839
+ "output_hidden_states": output_hidden_states,
1840
+ "return_dict": return_dict,
1841
+ "past_key_values": past_key_values,
1842
+ }
1843
+ )
1844
+ if "past_key_values" in fwd_params:
1845
+ return self.base_model(start_positions=start_positions, end_positions=end_positions, **kwargs)
1846
+ else:
1847
+ transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name)
1848
+ fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys())
1849
+ if "past_key_values" not in fwd_params:
1850
+ raise ValueError("Model does not support past key values which are required for prefix tuning.")
1851
+ outputs = transformer_backbone_name(**kwargs)
1852
+ sequence_output = outputs[0]
1853
+ if "dropout" in [name for name, _ in list(self.base_model.named_children())]:
1854
+ sequence_output = self.base_model.dropout(sequence_output)
1855
+ logits = self.base_model.get_submodule(self.cls_layer_name)(sequence_output)
1856
+ start_logits, end_logits = logits.split(1, dim=-1)
1857
+ start_logits = start_logits.squeeze(-1).contiguous()
1858
+ end_logits = end_logits.squeeze(-1).contiguous()
1859
+
1860
+ total_loss = None
1861
+ if start_positions is not None and end_positions is not None:
1862
+ # If we are on multi-GPU, split add a dimension
1863
+ if len(start_positions.size()) > 1:
1864
+ start_positions = start_positions.squeeze(-1)
1865
+ if len(end_positions.size()) > 1:
1866
+ end_positions = end_positions.squeeze(-1)
1867
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1868
+ ignored_index = start_logits.size(1)
1869
+ start_positions = start_positions.clamp(0, ignored_index)
1870
+ end_positions = end_positions.clamp(0, ignored_index)
1871
+
1872
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1873
+ start_loss = loss_fct(start_logits, start_positions)
1874
+ end_loss = loss_fct(end_logits, end_positions)
1875
+ total_loss = (start_loss + end_loss) / 2
1876
+
1877
+ if not return_dict:
1878
+ output = (start_logits, end_logits) + outputs[2:]
1879
+ return ((total_loss,) + output) if total_loss is not None else output
1880
+
1881
+ return QuestionAnsweringModelOutput(
1882
+ loss=total_loss,
1883
+ start_logits=start_logits,
1884
+ end_logits=end_logits,
1885
+ hidden_states=outputs.hidden_states,
1886
+ attentions=outputs.attentions,
1887
+ )
1888
+
1889
+
1890
+ class PeftModelForFeatureExtraction(PeftModel):
1891
+ """
1892
+ Peft model for extracting features/embeddings from transformer models
1893
+
1894
+ Args:
1895
+ model ([`~transformers.PreTrainedModel`]): Base transformer model.
1896
+ peft_config ([`PeftConfig`]): Peft config.
1897
+
1898
+ **Attributes**:
1899
+ - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model.
1900
+
1901
+ Example:
1902
+
1903
+ ```py
1904
+ >>> from transformers import AutoModel
1905
+ >>> from peft import PeftModelForFeatureExtraction, get_peft_config
1906
+
1907
+ >>> config = {
1908
+ ... "peft_type": "LORA",
1909
+ ... "task_type": "FEATURE_EXTRACTION",
1910
+ ... "inference_mode": False,
1911
+ ... "r": 16,
1912
+ ... "target_modules": ["query", "value"],
1913
+ ... "lora_alpha": 32,
1914
+ ... "lora_dropout": 0.05,
1915
+ ... "fan_in_fan_out": False,
1916
+ ... "bias": "none",
1917
+ ... }
1918
+ >>> peft_config = get_peft_config(config)
1919
+ >>> model = AutoModel.from_pretrained("bert-base-cased")
1920
+ >>> peft_model = PeftModelForFeatureExtraction(model, peft_config)
1921
+ >>> peft_model.print_trainable_parameters()
1922
+ ```
1923
+ """
1924
+
1925
+ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default"):
1926
+ super().__init__(model, peft_config, adapter_name)
1927
+
1928
+ def forward(
1929
+ self,
1930
+ input_ids=None,
1931
+ attention_mask=None,
1932
+ inputs_embeds=None,
1933
+ output_attentions=None,
1934
+ output_hidden_states=None,
1935
+ return_dict=None,
1936
+ task_ids=None,
1937
+ **kwargs,
1938
+ ):
1939
+ peft_config = self.active_peft_config
1940
+ if not peft_config.is_prompt_learning:
1941
+ if peft_config.peft_type == PeftType.POLY:
1942
+ kwargs["task_ids"] = task_ids
1943
+
1944
+ with self._enable_peft_forward_hooks(**kwargs):
1945
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
1946
+ return self.base_model(
1947
+ input_ids=input_ids,
1948
+ attention_mask=attention_mask,
1949
+ inputs_embeds=inputs_embeds,
1950
+ output_attentions=output_attentions,
1951
+ output_hidden_states=output_hidden_states,
1952
+ return_dict=return_dict,
1953
+ **kwargs,
1954
+ )
1955
+
1956
+ batch_size = _get_batch_size(input_ids, inputs_embeds)
1957
+ if attention_mask is not None:
1958
+ # concat prompt attention mask
1959
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
1960
+ attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
1961
+
1962
+ if kwargs.get("position_ids", None) is not None:
1963
+ warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
1964
+ kwargs["position_ids"] = None
1965
+ if kwargs.get("token_type_ids", None) is not None:
1966
+ warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids")
1967
+ kwargs["token_type_ids"] = None
1968
+ kwargs.update(
1969
+ {
1970
+ "attention_mask": attention_mask,
1971
+ "output_attentions": output_attentions,
1972
+ "output_hidden_states": output_hidden_states,
1973
+ "return_dict": return_dict,
1974
+ }
1975
+ )
1976
+
1977
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
1978
+ past_key_values = self.get_prompt(batch_size)
1979
+ return self.base_model(input_ids=input_ids, past_key_values=past_key_values, **kwargs)
1980
+ else:
1981
+ if inputs_embeds is None:
1982
+ inputs_embeds = self.word_embeddings(input_ids)
1983
+ prompts = self.get_prompt(batch_size=batch_size)
1984
+ prompts = prompts.to(inputs_embeds.dtype)
1985
+ inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)
1986
+ return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
env-llmeval/lib/python3.10/site-packages/peft/py.typed ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/peft/tuners/__init__.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+ # There's no way to ignore "F401 '...' imported but unused" warnings in this
3
+ # module, but to preserve other warnings. So, don't check this module at all
4
+
5
+ # coding=utf-8
6
+ # Copyright 2023-present the HuggingFace Inc. team.
7
+ #
8
+ # Licensed under the Apache License, Version 2.0 (the "License");
9
+ # you may not use this file except in compliance with the License.
10
+ # You may obtain a copy of the License at
11
+ #
12
+ # http://www.apache.org/licenses/LICENSE-2.0
13
+ #
14
+ # Unless required by applicable law or agreed to in writing, software
15
+ # distributed under the License is distributed on an "AS IS" BASIS,
16
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ # See the License for the specific language governing permissions and
18
+ # limitations under the License.
19
+
20
+ from .adaption_prompt import AdaptionPromptConfig, AdaptionPromptModel
21
+ from .lora import LoraConfig, LoraModel, LoftQConfig
22
+ from .loha import LoHaConfig, LoHaModel
23
+ from .lokr import LoKrConfig, LoKrModel
24
+ from .ia3 import IA3Config, IA3Model
25
+ from .adalora import AdaLoraConfig, AdaLoraModel
26
+ from .p_tuning import PromptEncoder, PromptEncoderConfig, PromptEncoderReparameterizationType
27
+ from .prefix_tuning import PrefixEncoder, PrefixTuningConfig
28
+ from .prompt_tuning import PromptEmbedding, PromptTuningConfig, PromptTuningInit
29
+ from .multitask_prompt_tuning import MultitaskPromptEmbedding, MultitaskPromptTuningConfig, MultitaskPromptTuningInit
30
+ from .oft import OFTConfig, OFTModel
31
+ from .mixed import MixedModel
32
+ from .poly import PolyConfig, PolyModel
env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__init__.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from peft.import_utils import is_bnb_4bit_available, is_bnb_available
16
+
17
+ from .config import AdaLoraConfig
18
+ from .gptq import SVDQuantLinear
19
+ from .layer import AdaLoraLayer, RankAllocator, SVDLinear
20
+ from .model import AdaLoraModel
21
+
22
+
23
+ __all__ = ["AdaLoraConfig", "AdaLoraLayer", "AdaLoraModel", "SVDLinear", "RankAllocator", "SVDQuantLinear"]
24
+
25
+
26
+ def __getattr__(name):
27
+ if (name == "SVDLinear8bitLt") and is_bnb_available():
28
+ from .bnb import SVDLinear8bitLt
29
+
30
+ return SVDLinear8bitLt
31
+
32
+ if (name == "SVDLinear4bit") and is_bnb_4bit_available():
33
+ from .bnb import SVDLinear4bit
34
+
35
+ return SVDLinear4bit
36
+
37
+ raise AttributeError(f"module {__name__} has no attribute {name}")
env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (880 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/bnb.cpython-310.pyc ADDED
Binary file (3.18 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/config.cpython-310.pyc ADDED
Binary file (2.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/gptq.cpython-310.pyc ADDED
Binary file (1.62 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/layer.cpython-310.pyc ADDED
Binary file (10.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/model.cpython-310.pyc ADDED
Binary file (9.75 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/bnb.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Any
16
+
17
+ import torch
18
+
19
+ from peft.import_utils import is_bnb_4bit_available, is_bnb_available
20
+
21
+ from .layer import AdaLoraLayer
22
+
23
+
24
+ if is_bnb_available():
25
+
26
+ class SVDLinear8bitLt(torch.nn.Module, AdaLoraLayer):
27
+ # Low-rank matrix for SVD-based adaptation
28
+ def __init__(
29
+ self,
30
+ base_layer: torch.nn.Module,
31
+ adapter_name: str,
32
+ r: int = 0,
33
+ lora_alpha: int = 1,
34
+ lora_dropout: float = 0.0,
35
+ init_lora_weights: bool = True,
36
+ **kwargs,
37
+ ) -> None:
38
+ super().__init__()
39
+ AdaLoraLayer.__init__(self, base_layer)
40
+ # Freezing the pre-trained weight matrix
41
+ self.get_base_layer().weight.requires_grad = False
42
+
43
+ self._active_adapter = adapter_name
44
+ self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
45
+
46
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
47
+ # note: no check for self.merged because merging is not supported (yet)
48
+ result = self.base_layer(x)
49
+
50
+ if self.disable_adapters:
51
+ return result
52
+
53
+ for active_adapter in self.active_adapters:
54
+ if active_adapter not in self.lora_A.keys():
55
+ continue
56
+ requires_conversion = not torch.is_autocast_enabled()
57
+ if requires_conversion:
58
+ expected_dtype = result.dtype
59
+ if x.dtype != torch.float32:
60
+ x = x.float()
61
+
62
+ lora_A = self.lora_A[active_adapter]
63
+ lora_B = self.lora_B[active_adapter]
64
+ lora_E = self.lora_E[active_adapter]
65
+ dropout = self.lora_dropout[active_adapter]
66
+ scaling = self.scaling[active_adapter]
67
+ ranknum = self.ranknum[active_adapter] + 1e-5
68
+
69
+ output = dropout(x) @ (lora_A * lora_E).T @ lora_B.T
70
+ if requires_conversion:
71
+ output = output.to(expected_dtype)
72
+ output = output * scaling / ranknum
73
+ # inplace operation on view is forbidden for MatMul8bitLtBackward, so avoid it
74
+ result = result + output
75
+ return result
76
+
77
+ def __repr__(self) -> str:
78
+ rep = super().__repr__()
79
+ return "adalora." + rep
80
+
81
+
82
+ if is_bnb_4bit_available():
83
+
84
+ class SVDLinear4bit(torch.nn.Module, AdaLoraLayer):
85
+ # Low-rank matrix for SVD-based adaptation
86
+ def __init__(
87
+ self,
88
+ base_layer: torch.nn.Module,
89
+ adapter_name: str,
90
+ r: int = 0,
91
+ lora_alpha: int = 1,
92
+ lora_dropout: float = 0.0,
93
+ init_lora_weights: bool = True,
94
+ **kwargs,
95
+ ) -> None:
96
+ super().__init__()
97
+ AdaLoraLayer.__init__(self, base_layer)
98
+ # Freezing the pre-trained weight matrix
99
+ self.get_base_layer().weight.requires_grad = False
100
+
101
+ self._active_adapter = adapter_name
102
+ self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
103
+
104
+ def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
105
+ # note: no check for self.merged because merging is not supported (yet)
106
+ result = self.base_layer(x, *args, **kwargs)
107
+
108
+ if self.disable_adapters:
109
+ return result
110
+
111
+ # As per Tim Dettmers, for 4bit, we need to defensively clone here.
112
+ # The reason is that in some cases, an error can occur that backprop
113
+ # does not work on a manipulated view. This issue may be solved with
114
+ # newer PyTorch versions but this would need extensive testing to be
115
+ # sure.
116
+ result = result.clone()
117
+
118
+ for active_adapter in self.active_adapters:
119
+ if active_adapter not in self.lora_A.keys():
120
+ continue
121
+
122
+ lora_A = self.lora_A[active_adapter]
123
+ lora_B = self.lora_B[active_adapter]
124
+ lora_E = self.lora_E[active_adapter]
125
+ dropout = self.lora_dropout[active_adapter]
126
+ scaling = self.scaling[active_adapter]
127
+ ranknum = self.ranknum[active_adapter] + 1e-5
128
+
129
+ requires_conversion = not torch.is_autocast_enabled()
130
+ if requires_conversion:
131
+ expected_dtype = result.dtype
132
+ compute_dtype = lora_A.dtype
133
+ if x.dtype != compute_dtype:
134
+ x = x.to(compute_dtype)
135
+
136
+ output = dropout(x) @ (lora_A * lora_E).T @ lora_B.T
137
+ if requires_conversion:
138
+ output = output.to(expected_dtype)
139
+ output = output * scaling / ranknum
140
+ result += output
141
+ return result
142
+
143
+ def __repr__(self) -> str:
144
+ rep = super().__repr__()
145
+ return "adalora." + rep
env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/config.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from dataclasses import dataclass, field
16
+ from typing import Optional
17
+
18
+ from peft.tuners.lora import LoraConfig
19
+ from peft.utils import PeftType
20
+
21
+
22
+ @dataclass
23
+ class AdaLoraConfig(LoraConfig):
24
+ """
25
+ This is the configuration class to store the configuration of a [`~peft.AdaLora`].
26
+
27
+ Args:
28
+ target_r (`int`): The target average rank of incremental matrix.
29
+ init_r (`int`): The initial rank for each incremental matrix.
30
+ tinit (`int`): The steps of initial fine-tuning warmup.
31
+ tfinal (`int`): The step of final fine-tuning.
32
+ deltaT (`int`): The time internval between two budget allocations.
33
+ beta1 (`float`): The hyperparameter of EMA for sensitivity smoothing.
34
+ beta2 (`float`): The hyperparameter of EMA for undertainty quantification.
35
+ orth_reg_weight (`float`): The coefficient of orthogonal regularization.
36
+ total_step (`int`): The total training steps that should be specified before training.
37
+ rank_pattern (`list`): The allocated rank for each weight matrix by RankAllocator.
38
+ """
39
+
40
+ target_r: int = field(default=8, metadata={"help": "Target Lora matrix dimension."})
41
+ init_r: int = field(default=12, metadata={"help": "Initial Lora matrix dimension."})
42
+ tinit: int = field(default=0, metadata={"help": "The steps of initial warmup."})
43
+ tfinal: int = field(default=0, metadata={"help": "The steps of final warmup."})
44
+ deltaT: int = field(default=1, metadata={"help": "Step interval of rank allocation."})
45
+ beta1: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."})
46
+ beta2: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."})
47
+ orth_reg_weight: float = field(default=0.5, metadata={"help": "The orthogonal regularization coefficient."})
48
+ total_step: Optional[int] = field(default=None, metadata={"help": "The total training steps."})
49
+ rank_pattern: Optional[dict] = field(default=None, metadata={"help": "The saved rank pattern."})
50
+
51
+ def __post_init__(self):
52
+ self.peft_type = PeftType.ADALORA
env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/gptq.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import torch
15
+
16
+ from .layer import AdaLoraLayer
17
+
18
+
19
+ class SVDQuantLinear(torch.nn.Module, AdaLoraLayer):
20
+ def __init__(
21
+ self,
22
+ base_layer,
23
+ adapter_name,
24
+ r: int = 0,
25
+ lora_alpha: int = 1,
26
+ lora_dropout: float = 0.0,
27
+ init_lora_weights: bool = True,
28
+ **kwargs,
29
+ ) -> None:
30
+ super().__init__()
31
+ AdaLoraLayer.__init__(self, base_layer)
32
+
33
+ # self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter
34
+ # for backwards compatibility
35
+ self.quant_linear_module = base_layer
36
+ self._active_adapter = adapter_name
37
+ self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
38
+
39
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
40
+ result = self.quant_linear_module(x)
41
+
42
+ if self.disable_adapters:
43
+ return result
44
+
45
+ for active_adapter in self.active_adapters:
46
+ if active_adapter not in self.lora_A.keys():
47
+ continue
48
+ lora_A = self.lora_A[active_adapter]
49
+ lora_B = self.lora_B[active_adapter]
50
+ lora_E = self.lora_E[active_adapter]
51
+ dropout = self.lora_dropout[active_adapter]
52
+ scaling = self.scaling[active_adapter]
53
+ ranknum = self.ranknum[active_adapter] + 1e-5
54
+
55
+ requires_conversion = not torch.is_autocast_enabled()
56
+ if requires_conversion:
57
+ expected_dtype = result.dtype
58
+ if x.dtype != torch.float32:
59
+ x = x.float()
60
+
61
+ output = (dropout(x) @ (lora_A * lora_E).T @ lora_B.T) * scaling / ranknum
62
+ # TODO: here, the dtype conversion is applied on the *whole expression*,
63
+ # not the intermediate result, unlike for SVDLinear8bitLT and
64
+ # SVDLinear4bit, is that correct?
65
+ if requires_conversion:
66
+ output = output.to(expected_dtype)
67
+ result += output
68
+ return result
69
+
70
+ def __repr__(self) -> str:
71
+ rep = super().__repr__()
72
+ return "adalora." + rep
env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/layer.py ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import warnings
16
+ from typing import Any, List, Optional
17
+
18
+ import torch
19
+ from torch import nn
20
+
21
+ from peft.tuners.lora import LoraLayer
22
+ from peft.tuners.tuners_utils import check_adapters_to_merge
23
+ from peft.utils import transpose
24
+
25
+
26
+ class AdaLoraLayer(LoraLayer):
27
+ # List all names of layers that may contain adapter weights
28
+ # Note: ranknum doesn't need to be included as it is not an nn.Module
29
+ adapter_layer_names = ("lora_A", "lora_B", "lora_E", "lora_embedding_A", "lora_embedding_B")
30
+ # other_param_names is defined in LoraLayer
31
+
32
+ def __init__(self, base_layer: nn.Module) -> None:
33
+ super().__init__(base_layer)
34
+ self.lora_E = nn.ParameterDict({})
35
+ self.lora_A = nn.ParameterDict({})
36
+ self.lora_B = nn.ParameterDict({})
37
+ self.ranknum = nn.ParameterDict({})
38
+
39
+ def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights):
40
+ if r < 0:
41
+ # note: r == 0 is allowed for AdaLora, see #1539
42
+ raise ValueError(f"`r` should be a positive integer or 0, but the value passed is {r}")
43
+
44
+ self.r[adapter_name] = r
45
+ self.lora_alpha[adapter_name] = lora_alpha
46
+ if lora_dropout > 0.0:
47
+ lora_dropout_layer = nn.Dropout(p=lora_dropout)
48
+ else:
49
+ lora_dropout_layer = nn.Identity()
50
+
51
+ self.lora_dropout[adapter_name] = lora_dropout_layer
52
+ # Actual trainable parameters
53
+ # Right singular vectors
54
+ self.lora_A[adapter_name] = nn.Parameter(torch.randn(r, self.in_features))
55
+ # Singular values
56
+ self.lora_E[adapter_name] = nn.Parameter(torch.randn(r, 1))
57
+ # Left singular vectors
58
+ self.lora_B[adapter_name] = nn.Parameter(torch.randn(self.out_features, r))
59
+ # The current rank
60
+ self.ranknum[adapter_name] = nn.Parameter(torch.randn(1), requires_grad=False)
61
+ self.ranknum[adapter_name].data.fill_(float(r))
62
+ self.ranknum[adapter_name].requires_grad = False
63
+ self.scaling[adapter_name] = lora_alpha if lora_alpha > 0 else float(r)
64
+ if init_lora_weights:
65
+ self.reset_lora_parameters(adapter_name)
66
+
67
+ if hasattr(self.get_base_layer(), "qweight"):
68
+ # QuantLinear
69
+ self.to(self.get_base_layer().qweight.device)
70
+ else:
71
+ self.to(self.get_base_layer().weight.device)
72
+ self.set_adapter(self.active_adapters)
73
+
74
+ def reset_lora_parameters(self, adapter_name):
75
+ if adapter_name in self.lora_A.keys():
76
+ nn.init.normal_(self.lora_E[adapter_name], mean=0.0, std=0.02)
77
+ nn.init.normal_(self.lora_A[adapter_name], mean=0.0, std=0.02)
78
+ nn.init.normal_(self.lora_B[adapter_name], mean=0.0, std=0.02)
79
+
80
+
81
+ class SVDLinear(nn.Module, AdaLoraLayer):
82
+ # SVD-based adaptation by a dense layer
83
+ def __init__(
84
+ self,
85
+ base_layer: nn.Module,
86
+ adapter_name: str,
87
+ r: int = 0,
88
+ lora_alpha: int = 1,
89
+ lora_dropout: float = 0.0,
90
+ fan_in_fan_out: bool = False,
91
+ init_lora_weights: bool = True,
92
+ **kwargs,
93
+ ) -> None:
94
+ super().__init__()
95
+ AdaLoraLayer.__init__(self, base_layer)
96
+ # Freezing the pre-trained weight matrix
97
+ self.get_base_layer().weight.requires_grad = False
98
+
99
+ self.fan_in_fan_out = fan_in_fan_out
100
+ self._active_adapter = adapter_name
101
+ self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
102
+
103
+ def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None:
104
+ """
105
+ Merge the active adapter weights into the base weights
106
+
107
+ Args:
108
+ safe_merge (`bool`, *optional*):
109
+ If True, the merge operation will be performed in a copy of the original weights and check for NaNs
110
+ before merging the weights. This is useful if you want to check if the merge operation will produce
111
+ NaNs. Defaults to `False`.
112
+ adapter_names (`List[str]`, *optional*):
113
+ The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
114
+ to `None`.
115
+ """
116
+ adapter_names = check_adapters_to_merge(self, adapter_names)
117
+ if not adapter_names:
118
+ # no adapter to merge
119
+ return
120
+
121
+ for active_adapter in adapter_names:
122
+ base_layer = self.get_base_layer()
123
+ if active_adapter in self.lora_A.keys():
124
+ if safe_merge:
125
+ # Note that safe_merge will be slower than the normal merge
126
+ # because of the copy operation.
127
+ orig_weights = base_layer.weight.data.clone()
128
+ orig_weights += self.get_delta_weight(active_adapter)
129
+
130
+ if not torch.isfinite(orig_weights).all():
131
+ raise ValueError(
132
+ f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
133
+ )
134
+
135
+ base_layer.weight.data = orig_weights
136
+ else:
137
+ base_layer.weight.data += self.get_delta_weight(active_adapter)
138
+ self.merged_adapters.append(active_adapter)
139
+
140
+ def unmerge(self) -> None:
141
+ """
142
+ This method unmerges all merged adapter layers from the base weights.
143
+ """
144
+ if not self.merged:
145
+ warnings.warn("Already unmerged. Nothing to do.")
146
+ return
147
+ while len(self.merged_adapters) > 0:
148
+ active_adapter = self.merged_adapters.pop()
149
+ if active_adapter in self.lora_A.keys():
150
+ self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter)
151
+
152
+ def get_delta_weight(self, adapter) -> torch.Tensor:
153
+ return (
154
+ transpose(self.lora_B[adapter] @ (self.lora_A[adapter] * self.lora_E[adapter]), self.fan_in_fan_out)
155
+ * self.scaling[adapter]
156
+ / (self.ranknum[adapter] + 1e-5)
157
+ )
158
+
159
+ def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
160
+ if self.disable_adapters:
161
+ if self.merged:
162
+ self.unmerge()
163
+ result = self.base_layer(x, *args, **kwargs)
164
+ elif self.merged:
165
+ result = self.base_layer(x, *args, **kwargs)
166
+ else:
167
+ result = self.base_layer(x, *args, **kwargs)
168
+ for active_adapter in self.active_adapters:
169
+ if active_adapter not in self.lora_A.keys():
170
+ continue
171
+ lora_A = self.lora_A[active_adapter]
172
+ lora_B = self.lora_B[active_adapter]
173
+ lora_E = self.lora_E[active_adapter]
174
+ dropout = self.lora_dropout[active_adapter]
175
+ scaling = self.scaling[active_adapter]
176
+ ranknum = self.ranknum[active_adapter] + 1e-5
177
+
178
+ x = x.to(lora_A.dtype)
179
+ result += (dropout(x) @ (lora_A * lora_E).T @ lora_B.T) * scaling / ranknum
180
+
181
+ return result
182
+
183
+ def __repr__(self) -> str:
184
+ rep = super().__repr__()
185
+ return "adalora." + rep
186
+
187
+
188
+ class RankAllocator:
189
+ """
190
+ The RankAllocator for AdaLoraModel. Paper: https://openreview.net/pdf?id=lq62uWRJjiY
191
+
192
+ Args:
193
+ config ([`AdaLoraConfig`]): The configuration of the AdaLora model.
194
+ model: the model that we apply AdaLoRA to.
195
+
196
+ """
197
+
198
+ def __init__(self, model, peft_config, adapter_name):
199
+ self.peft_config = peft_config
200
+ self.adapter_name = adapter_name
201
+ self.beta1 = peft_config.beta1
202
+ self.beta2 = peft_config.beta2
203
+ assert self.beta1 > 0 and self.beta1 < 1
204
+ assert self.beta2 > 0 and self.beta2 < 1
205
+
206
+ self.reset_ipt()
207
+ self._set_budget_scheduler(model)
208
+
209
+ def set_total_step(self, total_step):
210
+ self.peft_config.total_step = total_step
211
+
212
+ def reset_ipt(self):
213
+ self.ipt = {}
214
+ self.exp_avg_ipt = {}
215
+ self.exp_avg_unc = {}
216
+
217
+ def _set_budget_scheduler(self, model):
218
+ self.init_bgt = 0
219
+ self.name_set = set()
220
+ for n, p in model.named_parameters():
221
+ if f"lora_A.{self.adapter_name}" in n:
222
+ self.init_bgt += p.size(0)
223
+ self.name_set.add(n.replace("lora_A", "%s"))
224
+ self.name_set = sorted(self.name_set)
225
+ # The total final rank budget
226
+ self.target_bgt = self.peft_config.target_r * len(self.name_set)
227
+
228
+ def budget_schedule(self, step: int):
229
+ tinit = self.peft_config.tinit
230
+ tfinal = self.peft_config.tfinal
231
+ total_step = self.peft_config.total_step
232
+ # Initial warmup
233
+ if step <= tinit:
234
+ budget = self.init_bgt
235
+ mask_ind = False
236
+ # Final fine-tuning
237
+ elif step > total_step - tfinal:
238
+ budget = self.target_bgt
239
+ mask_ind = True
240
+ else:
241
+ # Budget decreasing with a cubic scheduler
242
+ mul_coeff = 1 - (step - tinit) / (total_step - tfinal - tinit)
243
+ budget = int((self.init_bgt - self.target_bgt) * (mul_coeff**3) + self.target_bgt)
244
+ mask_ind = True if step % self.peft_config.deltaT == 0 else False
245
+ return budget, mask_ind
246
+
247
+ def update_ipt(self, model):
248
+ # Update the sensitivity and uncertainty for every weight
249
+ for n, p in model.named_parameters():
250
+ if "lora_" in n and self.adapter_name in n:
251
+ if n not in self.ipt:
252
+ self.ipt[n] = torch.zeros_like(p)
253
+ self.exp_avg_ipt[n] = torch.zeros_like(p)
254
+ self.exp_avg_unc[n] = torch.zeros_like(p)
255
+ with torch.no_grad():
256
+ self.ipt[n] = (p * p.grad).abs().detach()
257
+ # Sensitivity smoothing
258
+ self.exp_avg_ipt[n] = self.beta1 * self.exp_avg_ipt[n] + (1 - self.beta1) * self.ipt[n]
259
+ # Uncertainty quantification
260
+ self.exp_avg_unc[n] = (
261
+ self.beta2 * self.exp_avg_unc[n] + (1 - self.beta2) * (self.ipt[n] - self.exp_avg_ipt[n]).abs()
262
+ )
263
+
264
+ def _element_score(self, n):
265
+ return self.exp_avg_ipt[n] * self.exp_avg_unc[n]
266
+
267
+ def _combine_ipt(self, ipt_E, ipt_AB):
268
+ ipt_AB = ipt_AB.sum(dim=1, keepdim=False)
269
+ sum_ipt = ipt_E.view(-1) + ipt_AB.view(-1)
270
+ return sum_ipt
271
+
272
+ def mask_to_budget(self, model, budget):
273
+ value_ipt = {}
274
+ vector_ipt = {}
275
+ triplet_ipt = {}
276
+ # Get the importance score for A, E, B
277
+ for n, p in model.named_parameters():
278
+ if f"lora_A.{self.adapter_name}" in n:
279
+ entry_ipt = self._element_score(n)
280
+ comb_ipt = torch.mean(entry_ipt, dim=1, keepdim=True)
281
+ name_m = n.replace("lora_A", "%s")
282
+ if name_m not in vector_ipt:
283
+ vector_ipt[name_m] = [comb_ipt]
284
+ else:
285
+ vector_ipt[name_m].append(comb_ipt)
286
+ if f"lora_B.{self.adapter_name}" in n:
287
+ entry_ipt = self._element_score(n)
288
+ comb_ipt = torch.mean(entry_ipt, dim=0, keepdim=False).view(-1, 1)
289
+ name_m = n.replace("lora_B", "%s")
290
+ if name_m not in vector_ipt:
291
+ vector_ipt[name_m] = [comb_ipt]
292
+ else:
293
+ vector_ipt[name_m].append(comb_ipt)
294
+ if f"lora_E.{self.adapter_name}" in n:
295
+ entry_ipt = self._element_score(n)
296
+ name_m = n.replace("lora_E", "%s")
297
+ value_ipt[name_m] = entry_ipt
298
+
299
+ all_score = []
300
+ # Calculate the score for each triplet
301
+ for name_m in vector_ipt:
302
+ ipt_E = value_ipt[name_m]
303
+ ipt_AB = torch.cat(vector_ipt[name_m], dim=1)
304
+ sum_ipt = self._combine_ipt(ipt_E, ipt_AB)
305
+ name_E = name_m % "lora_E"
306
+ triplet_ipt[name_E] = sum_ipt.view(-1, 1)
307
+ all_score.append(sum_ipt.view(-1))
308
+
309
+ # Get the threshold by ranking ipt
310
+ mask_threshold = torch.kthvalue(
311
+ torch.cat(all_score),
312
+ k=self.init_bgt - budget,
313
+ )[0].item()
314
+
315
+ rank_pattern = {}
316
+ # Mask the unimportant triplets
317
+ with torch.no_grad():
318
+ for n, p in model.named_parameters():
319
+ if f"lora_E.{self.adapter_name}" in n:
320
+ p.masked_fill_(triplet_ipt[n] <= mask_threshold, 0.0)
321
+ rank_pattern[n] = (~(triplet_ipt[n] <= mask_threshold)).view(-1).tolist()
322
+ return rank_pattern
323
+
324
+ def update_and_allocate(self, model, global_step, force_mask=False):
325
+ # # Update the importance score and allocate the budget
326
+ if global_step < self.peft_config.total_step - self.peft_config.tfinal:
327
+ self.update_ipt(model)
328
+ budget, mask_ind = self.budget_schedule(global_step)
329
+ # Allocate the budget according to importance scores
330
+ if mask_ind or force_mask:
331
+ rank_pattern = self.mask_to_budget(model, budget)
332
+ else:
333
+ rank_pattern = None
334
+ return budget, rank_pattern
335
+
336
+ def mask_using_rank_pattern(self, model, rank_pattern):
337
+ # Mask the unimportant triplets
338
+ is_adapter_name_truncated = False
339
+ if self.adapter_name not in next(iter(rank_pattern.keys())):
340
+ is_adapter_name_truncated = True
341
+
342
+ with torch.no_grad():
343
+ for n, p in model.named_parameters():
344
+ if f"lora_E.{self.adapter_name}" in n:
345
+ key = n if not is_adapter_name_truncated else n.replace(f".{self.adapter_name}", "")
346
+ mask = torch.Tensor(rank_pattern[key]).unsqueeze(-1).to(p.device)
347
+ p.masked_fill_(~mask.bool(), 0.0)
env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/model.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import warnings
16
+
17
+ import torch
18
+ from transformers.pytorch_utils import Conv1D
19
+
20
+ from peft.import_utils import is_bnb_4bit_available, is_bnb_available
21
+ from peft.tuners.lora import LoraConfig, LoraModel
22
+ from peft.tuners.tuners_utils import BaseTunerLayer
23
+ from peft.utils import (
24
+ TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING,
25
+ _freeze_adapter,
26
+ _get_submodules,
27
+ get_auto_gptq_quant_linear,
28
+ get_quantization_config,
29
+ )
30
+
31
+ from .gptq import SVDQuantLinear
32
+ from .layer import AdaLoraLayer, RankAllocator, SVDLinear
33
+
34
+
35
+ class AdaLoraModel(LoraModel):
36
+ """
37
+ Creates AdaLoRA (Adaptive LoRA) model from a pretrained transformers model. Paper:
38
+ https://openreview.net/forum?id=lq62uWRJjiY
39
+
40
+ Args:
41
+ model ([`transformers.PreTrainedModel`]): The model to be adapted.
42
+ config ([`AdaLoraConfig`]): The configuration of the AdaLora model.
43
+ adapter_name (`str`): The name of the adapter, defaults to `"default"`.
44
+
45
+ Returns:
46
+ `torch.nn.Module`: The AdaLora model.
47
+
48
+ Example::
49
+
50
+ >>> from transformers import AutoModelForSeq2SeqLM, LoraConfig >>> from peft import AdaLoraModel, AdaLoraConfig
51
+ >>> config = AdaLoraConfig(
52
+ peft_type="ADALORA", task_type="SEQ_2_SEQ_LM", r=8, lora_alpha=32, target_modules=["q", "v"],
53
+ lora_dropout=0.01,
54
+ )
55
+ >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> model = AdaLoraModel(model, config, "default")
56
+
57
+ **Attributes**:
58
+ - **model** ([`transformers.PreTrainedModel`]) -- The model to be adapted.
59
+ - **peft_config** ([`AdaLoraConfig`]): The configuration of the AdaLora model.
60
+ """
61
+
62
+ # Note: don't redefine prefix here, it should be inherited from LoraModel
63
+
64
+ def __init__(self, model, config, adapter_name):
65
+ super().__init__(model, config, adapter_name)
66
+
67
+ traininable_mode_counter = 0
68
+ for config in self.peft_config.values():
69
+ if not config.inference_mode:
70
+ traininable_mode_counter += 1
71
+
72
+ if traininable_mode_counter > 1:
73
+ raise ValueError(
74
+ "AdaLoraModel supports only 1 trainable adapter. "
75
+ "When using multiple adapters, set inference_mode to True for all adapters except the one you want to train."
76
+ )
77
+
78
+ if self.peft_config[adapter_name].inference_mode:
79
+ _freeze_adapter(self.model, adapter_name)
80
+ else:
81
+ self.trainable_adapter_name = adapter_name
82
+ self.rankallocator = RankAllocator(self.model, self.peft_config[adapter_name], self.trainable_adapter_name)
83
+
84
+ def _check_new_adapter_config(self, config: LoraConfig) -> None:
85
+ """
86
+ A helper method to check the config when a new adapter is being added.
87
+
88
+ Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters.
89
+
90
+ """
91
+ super()._check_new_adapter_config(config)
92
+
93
+ traininable_mode_counter = 0
94
+ for config_ in self.peft_config.values():
95
+ if not config_.inference_mode:
96
+ traininable_mode_counter += 1
97
+
98
+ if traininable_mode_counter > 1:
99
+ raise ValueError(
100
+ f"{self.__class__.__name__} supports only 1 trainable adapter. "
101
+ "When using multiple adapters, set inference_mode to True for all adapters except the one "
102
+ "you want to train."
103
+ )
104
+
105
+ def _create_and_replace(
106
+ self,
107
+ lora_config,
108
+ adapter_name,
109
+ target,
110
+ target_name,
111
+ parent,
112
+ current_key,
113
+ ):
114
+ kwargs = {
115
+ "r": lora_config.init_r,
116
+ "lora_alpha": lora_config.lora_alpha,
117
+ "lora_dropout": lora_config.lora_dropout,
118
+ "fan_in_fan_out": lora_config.fan_in_fan_out,
119
+ "init_lora_weights": lora_config.init_lora_weights,
120
+ "loaded_in_8bit": getattr(self.model, "is_loaded_in_8bit", False),
121
+ "loaded_in_4bit": getattr(self.model, "is_loaded_in_4bit", False),
122
+ }
123
+ if (kwargs["loaded_in_8bit"] or kwargs["loaded_in_4bit"]) and not is_bnb_available():
124
+ raise ImportError(
125
+ "To use AdaLora with 8-bit quantization, please install the `bitsandbytes` package. "
126
+ "You can install it with `pip install bitsandbytes`."
127
+ )
128
+
129
+ quantization_config = get_quantization_config(self.model, method="gptq")
130
+ if quantization_config is not None:
131
+ kwargs["gptq_quantization_config"] = quantization_config
132
+
133
+ # If it is not an AdaLoraLayer, create a new module, else update it with new adapters
134
+ if not isinstance(target, AdaLoraLayer):
135
+ new_module = self._create_new_module(lora_config, adapter_name, target, **kwargs)
136
+ if adapter_name != self.active_adapter:
137
+ # adding an additional adapter: it is not automatically trainable
138
+ new_module.requires_grad_(False)
139
+ self._replace_module(parent, target_name, new_module, target)
140
+ else:
141
+ target.update_layer(
142
+ adapter_name,
143
+ lora_config.init_r,
144
+ lora_config.lora_alpha,
145
+ lora_config.lora_dropout,
146
+ lora_config.init_lora_weights,
147
+ )
148
+
149
+ @staticmethod
150
+ def _create_new_module(lora_config, adapter_name, target, **kwargs):
151
+ # avoid eager bnb import
152
+ if is_bnb_available():
153
+ import bitsandbytes as bnb
154
+
155
+ from .bnb import SVDLinear8bitLt
156
+ if is_bnb_4bit_available():
157
+ from .bnb import SVDLinear4bit
158
+
159
+ gptq_quantization_config = kwargs.get("gptq_quantization_config", None)
160
+ AutoGPTQQuantLinear = get_auto_gptq_quant_linear(gptq_quantization_config)
161
+
162
+ loaded_in_8bit = kwargs.pop("loaded_in_8bit", False)
163
+ loaded_in_4bit = kwargs.pop("loaded_in_4bit", False)
164
+
165
+ if isinstance(target, BaseTunerLayer):
166
+ target_base_layer = target.get_base_layer()
167
+ else:
168
+ target_base_layer = target
169
+
170
+ if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt):
171
+ kwargs.update(
172
+ {
173
+ "has_fp16_weights": target_base_layer.state.has_fp16_weights,
174
+ "memory_efficient_backward": target_base_layer.state.memory_efficient_backward,
175
+ "threshold": target_base_layer.state.threshold,
176
+ "index": target_base_layer.index,
177
+ }
178
+ )
179
+ new_module = SVDLinear8bitLt(target, adapter_name, **kwargs)
180
+ elif loaded_in_4bit and is_bnb_4bit_available() and isinstance(target_base_layer, bnb.nn.Linear4bit):
181
+ fourbit_kwargs = kwargs.copy()
182
+ fourbit_kwargs.update(
183
+ {
184
+ "compute_dtype": target_base_layer.compute_dtype,
185
+ "compress_statistics": target_base_layer.weight.compress_statistics,
186
+ "quant_type": target_base_layer.weight.quant_type,
187
+ }
188
+ )
189
+ new_module = SVDLinear4bit(target, adapter_name, **fourbit_kwargs)
190
+ elif AutoGPTQQuantLinear is not None and isinstance(target, AutoGPTQQuantLinear):
191
+ new_module = SVDQuantLinear(target, adapter_name, **kwargs)
192
+ else:
193
+ if isinstance(target_base_layer, torch.nn.Linear):
194
+ if kwargs["fan_in_fan_out"]:
195
+ warnings.warn(
196
+ "fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. "
197
+ "Setting fan_in_fan_out to False."
198
+ )
199
+ kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False
200
+ elif isinstance(target_base_layer, Conv1D):
201
+ if not kwargs["fan_in_fan_out"]:
202
+ warnings.warn(
203
+ "fan_in_fan_out is set to False but the target module is `Conv1D`. "
204
+ "Setting fan_in_fan_out to True."
205
+ )
206
+ kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = True
207
+ else:
208
+ raise ValueError(
209
+ f"Target module {target} is not supported. "
210
+ f"Currently, only `torch.nn.Linear` and `Conv1D` are supported."
211
+ )
212
+ new_module = SVDLinear(target, adapter_name, **kwargs)
213
+
214
+ return new_module
215
+
216
+ @staticmethod
217
+ def _prepare_adapter_config(peft_config, model_config):
218
+ if peft_config.target_modules is None:
219
+ if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING:
220
+ raise ValueError("Please specify `target_modules` in `peft_config`")
221
+ peft_config.target_modules = TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING[
222
+ model_config["model_type"]
223
+ ]
224
+ return peft_config
225
+
226
+ def __getattr__(self, name: str):
227
+ """Forward missing attributes to the wrapped module."""
228
+ try:
229
+ return super().__getattr__(name) # defer to nn.Module's logic
230
+ except AttributeError:
231
+ return getattr(self.model, name)
232
+
233
+ def forward(self, *args, **kwargs):
234
+ outputs = self.model.forward(*args, **kwargs)
235
+
236
+ if (getattr(outputs, "loss", None) is not None) and isinstance(outputs.loss, torch.Tensor):
237
+ # Calculate the orthogonal regularization
238
+ orth_reg_weight = self.peft_config[self.trainable_adapter_name].orth_reg_weight
239
+
240
+ if orth_reg_weight <= 0:
241
+ raise ValueError("orth_reg_weight should be greater than 0. ")
242
+
243
+ regu_loss = 0
244
+ num_param = 0
245
+ for n, p in self.model.named_parameters():
246
+ if ("lora_A" in n or "lora_B" in n) and self.trainable_adapter_name in n:
247
+ para_cov = p @ p.T if "lora_A" in n else p.T @ p
248
+ I = torch.eye(*para_cov.size(), out=torch.empty_like(para_cov)) # noqa: E741
249
+ I.requires_grad = False
250
+ num_param += 1
251
+ regu_loss += torch.norm(para_cov - I, p="fro")
252
+ if num_param > 0:
253
+ regu_loss = regu_loss / num_param
254
+ else:
255
+ regu_loss = 0
256
+ outputs.loss += orth_reg_weight * regu_loss
257
+ return outputs
258
+
259
+ def resize_modules_by_rank_pattern(self, rank_pattern, adapter_name):
260
+ lora_config = self.peft_config[adapter_name]
261
+ for name, rank_idx in rank_pattern.items():
262
+ if isinstance(rank_idx, list):
263
+ rank = sum(rank_idx)
264
+ elif isinstance(rank_idx, torch.Tensor):
265
+ rank_idx = rank_idx.view(-1)
266
+ rank = rank_idx.sum().item()
267
+ else:
268
+ raise ValueError("Unexpected type of rank_idx")
269
+ key = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1])
270
+ _, target, _ = _get_submodules(self.model, key)
271
+ lora_E_weights = target.lora_E[adapter_name][rank_idx]
272
+ lora_A_weights = target.lora_A[adapter_name][rank_idx]
273
+ lora_B_weights = target.lora_B[adapter_name][:, rank_idx]
274
+ ranknum = target.ranknum[adapter_name]
275
+ target.update_layer(
276
+ adapter_name,
277
+ rank,
278
+ lora_config.lora_alpha,
279
+ lora_config.lora_dropout,
280
+ lora_config.init_lora_weights,
281
+ )
282
+ with torch.no_grad():
283
+ if rank > 0:
284
+ target.lora_E[adapter_name].copy_(lora_E_weights)
285
+ target.lora_A[adapter_name].copy_(lora_A_weights)
286
+ target.lora_B[adapter_name].copy_(lora_B_weights)
287
+ # The scaling is exactly as the previous
288
+ target.ranknum[adapter_name].copy_(ranknum)
289
+
290
+ def resize_state_dict_by_rank_pattern(self, rank_pattern, state_dict, adapter_name):
291
+ for name, rank_idx in rank_pattern.items():
292
+ rank = sum(rank_idx)
293
+ prefix = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1])
294
+ for layer in ["lora_E", "lora_A", "lora_B"]:
295
+ key = f"base_model.model.{prefix}.{layer}.{adapter_name}"
296
+ if layer != "lora_B":
297
+ state_dict[key] = (
298
+ state_dict[key][rank_idx] if rank != state_dict[key].shape[0] else state_dict[key]
299
+ )
300
+ else:
301
+ state_dict[key] = (
302
+ state_dict[key][:, rank_idx] if rank != state_dict[key].shape[1] else state_dict[key]
303
+ )
304
+ return state_dict
305
+
306
+ def update_and_allocate(self, global_step):
307
+ """
308
+ This method updates Adalora budget and mask.
309
+
310
+ This should be called in every training step after `loss.backward()` and before `zero_grad()`.
311
+
312
+ `tinit`, `tfinal` and `deltaT` are handled with in the method.
313
+
314
+ Args:
315
+ global_step (`int`): The current training step, it is used to calculate adalora budget.
316
+
317
+ Example:
318
+
319
+ ```python
320
+ >>> loss = model(**input).loss
321
+ >>> loss.backward()
322
+ >>> optimizer.step()
323
+ >>> model.base_model.update_and_allocate(i_step)
324
+ >>> optimizer.zero_grad()
325
+ ```
326
+ """
327
+ lora_config = self.peft_config[self.trainable_adapter_name]
328
+ # Update the importance score and allocate the budget
329
+ if global_step < lora_config.total_step - lora_config.tfinal:
330
+ _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step)
331
+ if rank_pattern:
332
+ lora_config.rank_pattern = rank_pattern
333
+ # Finalize the budget allocation
334
+ elif global_step == lora_config.total_step - lora_config.tfinal:
335
+ _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step, force_mask=True)
336
+ # for some reason, this freezes the trainable parameters and nothing gets updates
337
+ # self.resize_modules_by_rank_pattern(rank_pattern, self.trainable_adapter_name)
338
+ lora_config.rank_pattern = rank_pattern
339
+ self.rankallocator.reset_ipt()
340
+ # Currently using inefficient way to mask the unimportant weights using the rank pattern
341
+ # due to problem mentioned above
342
+ elif global_step > lora_config.total_step - lora_config.tfinal:
343
+ self.rankallocator.mask_using_rank_pattern(self.model, lora_config.rank_pattern)
344
+ # Pass the function and do forward propagation
345
+ else:
346
+ return None
env-llmeval/lib/python3.10/site-packages/peft/tuners/ia3/__init__.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from peft.import_utils import is_bnb_4bit_available, is_bnb_available
16
+
17
+ from .config import IA3Config
18
+ from .layer import Conv2d, IA3Layer, Linear
19
+ from .model import IA3Model
20
+
21
+
22
+ __all__ = ["Conv2d", "IA3Config", "IA3Layer", "IA3Model", "Linear"]
23
+
24
+
25
+ def __getattr__(name):
26
+ if (name == "Linear8bitLt") and is_bnb_available():
27
+ from .bnb import Linear8bitLt
28
+
29
+ return Linear8bitLt
30
+
31
+ if (name == "Linear4bit") and is_bnb_4bit_available():
32
+ from .bnb import Linear4bit
33
+
34
+ return Linear4bit
35
+
36
+ raise AttributeError(f"module {__name__} has no attribute {name}")
env-llmeval/lib/python3.10/site-packages/peft/tuners/ia3/bnb.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Any
16
+
17
+ import torch
18
+
19
+ from peft.import_utils import is_bnb_4bit_available, is_bnb_available
20
+
21
+ from .layer import IA3Layer
22
+
23
+
24
+ if is_bnb_available():
25
+
26
+ class Linear8bitLt(torch.nn.Module, IA3Layer):
27
+ # (IA)^3 implemented in a dense layer
28
+ def __init__(
29
+ self,
30
+ base_layer: torch.nn.Module,
31
+ adapter_name: str,
32
+ is_feedforward: bool,
33
+ init_ia3_weights: bool = True,
34
+ **kwargs,
35
+ ) -> None:
36
+ super().__init__()
37
+ IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward)
38
+
39
+ # Freezing the pre-trained weight matrix
40
+ self.get_base_layer().weight.requires_grad = False
41
+ self._active_adapter = adapter_name
42
+ self.update_layer(adapter_name, init_ia3_weights)
43
+
44
+ def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
45
+ # note: no check for self.merged because merging is not supported (yet)
46
+ if self.disable_adapters:
47
+ return self.base_layer(x)
48
+
49
+ ia3_scaling = 1
50
+ for active_adapter in self.active_adapters:
51
+ if active_adapter not in self.ia3_l.keys():
52
+ continue
53
+ ia3_scaling *= self.ia3_l[active_adapter].flatten()
54
+
55
+ requires_conversion = (not torch.is_autocast_enabled()) and (x.dtype != torch.float32)
56
+ if requires_conversion:
57
+ x = x.float()
58
+ if self.is_feedforward:
59
+ result = self.base_layer(x * ia3_scaling)
60
+ expected_dtype = result.dtype
61
+ else:
62
+ result = self.base_layer(x)
63
+ expected_dtype = result.dtype
64
+ result = result * ia3_scaling
65
+
66
+ if requires_conversion:
67
+ result = result.to(expected_dtype)
68
+
69
+ return result
70
+
71
+ def __repr__(self) -> str:
72
+ rep = super().__repr__()
73
+ return "ia3." + rep
74
+
75
+
76
+ if is_bnb_4bit_available():
77
+
78
+ class Linear4bit(torch.nn.Module, IA3Layer):
79
+ # IA3 implemented in a dense layer
80
+ def __init__(
81
+ self,
82
+ base_layer: torch.nn.Module,
83
+ adapter_name: str,
84
+ is_feedforward: bool,
85
+ init_ia3_weights: bool = True,
86
+ **kwargs,
87
+ ) -> None:
88
+ super().__init__()
89
+ IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward)
90
+
91
+ # Freezing the pre-trained weight matrix
92
+ self.get_base_layer().weight.requires_grad = False
93
+ self._active_adapter = adapter_name
94
+ self.update_layer(adapter_name, init_ia3_weights)
95
+
96
+ def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
97
+ # note: no check for self.merged because merging is not supported (yet)
98
+ if self.disable_adapters:
99
+ return self.base_layer(x)
100
+
101
+ ia3_scaling = 1
102
+ for active_adapter in self.active_adapters:
103
+ if active_adapter not in self.ia3_l.keys():
104
+ continue
105
+ ia3_scaling *= self.ia3_l[active_adapter].flatten()
106
+
107
+ requires_conversion = (not torch.is_autocast_enabled()) and (x.dtype != torch.float32)
108
+ if requires_conversion:
109
+ x = x.float()
110
+ if self.is_feedforward:
111
+ result = self.base_layer(x * ia3_scaling)
112
+ expected_dtype = result.dtype
113
+ else:
114
+ result = self.base_layer(x)
115
+ expected_dtype = result.dtype
116
+ result = result * ia3_scaling
117
+
118
+ result = result.clone()
119
+ # adalora.py and lora.py both suggest that this is necessary for 4-bit training on older versions of Pytorch.
120
+ # This has been duplicated here.
121
+
122
+ if requires_conversion:
123
+ result = result.to(expected_dtype)
124
+
125
+ return result
126
+
127
+ def __repr__(self) -> str:
128
+ rep = super().__repr__()
129
+ return "ia3." + rep
env-llmeval/lib/python3.10/site-packages/peft/tuners/ia3/layer.py ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import warnings
16
+ from typing import Any, List, Optional
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ from transformers.pytorch_utils import Conv1D
21
+
22
+ from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
23
+ from peft.utils import transpose
24
+
25
+
26
+ class IA3Layer(BaseTunerLayer):
27
+ # All names of layers that may contain adapter weights
28
+ adapter_layer_names = ("ia3_l",)
29
+
30
+ def __init__(self, base_layer: nn.Module, is_feedforward: bool, **kwargs) -> None:
31
+ self.base_layer = base_layer
32
+ self.ia3_l = nn.ParameterDict({})
33
+ # Mark the weight as unmerged
34
+ self._disable_adapters = False
35
+ self.merged_adapters = []
36
+ self.is_feedforward = is_feedforward
37
+
38
+ base_layer = self.get_base_layer()
39
+ if isinstance(base_layer, nn.Linear):
40
+ in_features, out_features = base_layer.in_features, base_layer.out_features
41
+ elif isinstance(base_layer, nn.Conv2d):
42
+ in_features, out_features = base_layer.in_channels, base_layer.out_channels
43
+ elif isinstance(base_layer, nn.Embedding):
44
+ in_features, out_features = base_layer.num_embeddings, base_layer.embedding_dim
45
+ elif isinstance(base_layer, Conv1D):
46
+ in_features, out_features = (
47
+ base_layer.weight.ds_shape if hasattr(base_layer.weight, "ds_shape") else base_layer.weight.shape
48
+ )
49
+ else:
50
+ raise ValueError(f"Unsupported layer type {type(base_layer)}")
51
+ self.in_features = in_features
52
+ self.out_features = out_features
53
+
54
+ def update_layer(self, adapter_name, init_ia3_weights):
55
+ # This code works for linear layers, override for other layer types
56
+ # Actual trainable parameters
57
+ if self.is_feedforward:
58
+ weight = torch.randn((1, self.in_features))
59
+ else:
60
+ weight = torch.randn((self.out_features, 1))
61
+ self.ia3_l[adapter_name] = nn.Parameter(weight)
62
+ if init_ia3_weights:
63
+ self.reset_ia3_parameters(adapter_name)
64
+ self.to(self.get_base_layer().weight.device)
65
+ self.set_adapter(self.active_adapters)
66
+
67
+ def reset_ia3_parameters(self, adapter_name):
68
+ if adapter_name in self.ia3_l.keys():
69
+ # initialize learned vector with torch.ones
70
+ nn.init.constant_(self.ia3_l[adapter_name], 1.0)
71
+
72
+
73
+ class Linear(nn.Module, IA3Layer):
74
+ # (IA)^3 implemented in a dense layer
75
+ def __init__(
76
+ self,
77
+ base_layer: nn.Module,
78
+ adapter_name: str,
79
+ fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out)
80
+ is_feedforward: bool = False, # Set to True if the layer is treated as a feedforward layer
81
+ is_target_conv_1d_layer: bool = False, # whether target module is a conv1d layer. useful while unloading later
82
+ init_ia3_weights: bool = True, # whether to initialize IA3 weights
83
+ **kwargs,
84
+ ) -> None:
85
+ super().__init__()
86
+ IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward)
87
+ self.fan_in_fan_out = fan_in_fan_out
88
+ self.is_target_conv_1d_layer = is_target_conv_1d_layer
89
+ self._active_adapter = adapter_name
90
+ self.update_layer(adapter_name, init_ia3_weights)
91
+
92
+ def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None:
93
+ """
94
+ Merge the active adapter weights into the base weights
95
+
96
+ Args:
97
+ safe_merge (`bool`, *optional*):
98
+ If True, the merge operation will be performed in a copy of the original weights and check for NaNs
99
+ before merging the weights. This is useful if you want to check if the merge operation will produce
100
+ NaNs. Defaults to `False`.
101
+ adapter_names (`List[str]`, *optional*):
102
+ The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
103
+ to `None`.
104
+ """
105
+ adapter_names = check_adapters_to_merge(self, adapter_names)
106
+ if not adapter_names:
107
+ # no adapter to merge
108
+ return
109
+
110
+ for active_adapter in adapter_names:
111
+ if active_adapter in self.ia3_l.keys():
112
+ base_layer = self.get_base_layer()
113
+ ia3_l = transpose(self.ia3_l[active_adapter].data, self.fan_in_fan_out)
114
+ if safe_merge:
115
+ orig_weights = base_layer.weight.data
116
+ orig_weights = torch.mul(orig_weights, ia3_l)
117
+
118
+ if not torch.isfinite(orig_weights).all():
119
+ raise ValueError(
120
+ f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
121
+ )
122
+ base_layer.weight.data = orig_weights
123
+ else:
124
+ base_layer.weight.data = torch.mul(base_layer.weight.data, ia3_l)
125
+
126
+ if not self.is_feedforward and (base_layer.bias is not None):
127
+ scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape)
128
+ base_layer.bias.data = torch.mul(base_layer.bias.data, scaling.data)
129
+
130
+ self.merged_adapters.append(active_adapter)
131
+
132
+ def unmerge(self) -> None:
133
+ """
134
+ This method unmerges all merged adapter layers from the base weights.
135
+ """
136
+ if not self.merged:
137
+ warnings.warn("Already unmerged. Nothing to do.")
138
+ return
139
+
140
+ warnings.warn("Unmerge result can be inaccurate for (IA)^3.")
141
+ while len(self.merged_adapters) > 0:
142
+ active_adapter = self.merged_adapters.pop()
143
+ if active_adapter in self.ia3_l.keys():
144
+ base_layer = self.get_base_layer()
145
+ # Add tolerace to avoid division by zero
146
+ ia3_l = transpose(self.ia3_l[active_adapter].data, self.fan_in_fan_out) + 1e-8
147
+ base_layer.weight.data = torch.div(base_layer.weight.data, ia3_l)
148
+
149
+ if not self.is_feedforward and (base_layer.bias is not None):
150
+ scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape)
151
+ base_layer.bias.data = torch.div(base_layer.bias.data, scaling.data + 1e-8)
152
+
153
+ def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
154
+ dtype = previous_dtype = x.dtype
155
+
156
+ if self.disable_adapters:
157
+ if self.merged:
158
+ self.unmerge()
159
+ result = self.base_layer(x, *args, **kwargs)
160
+ elif self.merged:
161
+ result = self.base_layer(x, *args, **kwargs)
162
+ else:
163
+ ia3_scaling = 1
164
+ for active_adapter in self.active_adapters:
165
+ if active_adapter not in self.ia3_l.keys():
166
+ continue
167
+ dtype = self.ia3_l[active_adapter].dtype
168
+ ia3_scaling *= self.ia3_l[active_adapter].flatten()
169
+
170
+ if self.is_feedforward:
171
+ x = x.to(dtype)
172
+ # TODO: weight.dtype can be != self.ia3_l[self.active_adapters].dtype
173
+ # e.g. bf16 vs fp32. Is that okay?
174
+ interm = (x * ia3_scaling).to(self.get_base_layer().weight.dtype)
175
+ result = self.base_layer(interm, *args, **kwargs)
176
+ else:
177
+ result = self.base_layer(x, *args, **kwargs)
178
+ result = result.to(dtype) * ia3_scaling
179
+
180
+ result = result.to(previous_dtype)
181
+ return result
182
+
183
+
184
+ class Conv2d(nn.Module, IA3Layer):
185
+ def __init__(
186
+ self,
187
+ base_layer: nn.Module,
188
+ adapter_name: str,
189
+ fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out)
190
+ is_feedforward: bool = False, # Set to True if the layer is treated as a feedforward layer
191
+ init_ia3_weights: bool = True,
192
+ **kwargs,
193
+ ) -> None:
194
+ super().__init__()
195
+ IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward)
196
+ self.fan_in_fan_out = fan_in_fan_out
197
+ self._active_adapter = adapter_name
198
+
199
+ self.update_layer(adapter_name, init_ia3_weights)
200
+
201
+ def update_layer(self, adapter_name, init_ia3_weights):
202
+ # Actual trainable parameters
203
+ if self.is_feedforward:
204
+ weight = torch.randn((1, self.in_features, 1, 1))
205
+ else:
206
+ weight = torch.randn((1, self.out_features, 1, 1))
207
+ self.ia3_l[adapter_name] = nn.Parameter(weight)
208
+ if init_ia3_weights:
209
+ self.reset_ia3_parameters(adapter_name)
210
+ self.to(self.get_base_layer().weight.device)
211
+ self.set_adapter(self.active_adapters)
212
+
213
+ def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None:
214
+ """
215
+ Merge the active adapter weights into the base weights
216
+
217
+ Args:
218
+ safe_merge (`bool`, *optional*):
219
+ If True, the merge operation will be performed in a copy of the original weights and check for NaNs
220
+ before merging the weights. This is useful if you want to check if the merge operation will produce
221
+ NaNs. Defaults to `False`.
222
+ adapter_names (`List[str]`, *optional*):
223
+ The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
224
+ to `None`.
225
+ """
226
+ adapter_names = check_adapters_to_merge(self, adapter_names)
227
+ if not adapter_names:
228
+ # no adapter to merge
229
+ return
230
+
231
+ for active_adapter in adapter_names:
232
+ if active_adapter in self.ia3_l.keys():
233
+ base_layer = self.get_base_layer()
234
+ ia3_scaling = self.ia3_l[active_adapter].data
235
+ if not self.is_feedforward:
236
+ ia3_scaling = ia3_scaling.permute(1, 0, 2, 3)
237
+
238
+ if safe_merge:
239
+ output_weight = torch.mul(base_layer.weight.data, ia3_scaling).clone()
240
+
241
+ if not torch.isfinite(output_weight).all():
242
+ raise ValueError(
243
+ f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
244
+ )
245
+
246
+ base_layer.weight.data = output_weight
247
+ else:
248
+ base_layer.weight.data = torch.mul(base_layer.weight.data, ia3_scaling)
249
+
250
+ if not self.is_feedforward and (base_layer.bias is not None):
251
+ scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape)
252
+ base_layer.bias.data = torch.mul(base_layer.bias.data, scaling.data)
253
+
254
+ self.merged_adapters.append(active_adapter)
255
+
256
+ def unmerge(self) -> None:
257
+ """
258
+ This method unmerges all merged adapter layers from the base weights.
259
+ """
260
+ if not self.merged:
261
+ warnings.warn("Already unmerged. Nothing to do.")
262
+ return
263
+
264
+ warnings.warn("Unmerge result can be inaccurate for (IA)^3.")
265
+ while len(self.merged_adapters) > 0:
266
+ active_adapter = self.merged_adapters.pop()
267
+ if active_adapter in self.ia3_l.keys():
268
+ base_layer = self.get_base_layer()
269
+ # divide by (IA)^3 vector. Add tolerace to avoid division by zero
270
+ ia3_scaling = self.ia3_l[active_adapter].data
271
+ if not self.is_feedforward:
272
+ ia3_scaling = ia3_scaling.permute(1, 0, 2, 3)
273
+ base_layer.weight.data = torch.div(base_layer.weight.data, ia3_scaling + 1e-8)
274
+
275
+ if not self.is_feedforward and (base_layer.bias is not None):
276
+ scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape)
277
+ base_layer.bias.data = torch.mul(base_layer.bias.data, scaling.data)
278
+
279
+ def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
280
+ dtype = previous_dtype = x.dtype
281
+
282
+ if self.disable_adapters:
283
+ if self.merged:
284
+ self.unmerge()
285
+ result = self.base_layer(x, *args, **kwargs)
286
+ elif self.merged:
287
+ result = self.base_layer(x, *args, **kwargs)
288
+ else:
289
+ ia3_scaling = 1
290
+ for active_adapter in self.active_adapters:
291
+ if active_adapter not in self.ia3_l.keys():
292
+ continue
293
+ dtype = self.ia3_l[active_adapter].dtype
294
+ ia3_scaling *= self.ia3_l[active_adapter]
295
+
296
+ if self.is_feedforward:
297
+ x = x.to(dtype)
298
+ # TODO: weight.dtype can be != self.ia3_l[self.active_adapters].dtype
299
+ # e.g. bf16 vs fp32. Is that okay?
300
+ interm = (x * ia3_scaling).to(self.get_base_layer().weight.dtype)
301
+ result = self.base_layer(interm, *args, **kwargs)
302
+ else:
303
+ result = self.base_layer(x, *args, **kwargs)
304
+ result = result.to(dtype) * ia3_scaling
305
+
306
+ result = result.to(previous_dtype)
307
+ return result
env-llmeval/lib/python3.10/site-packages/peft/tuners/lycoris_utils.py ADDED
@@ -0,0 +1,428 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from __future__ import annotations
15
+
16
+ import warnings
17
+ from abc import abstractmethod
18
+ from dataclasses import dataclass, field
19
+ from typing import Any, Optional, Union
20
+
21
+ import torch
22
+ import torch.nn as nn
23
+ from tqdm import tqdm
24
+
25
+ from peft.config import PeftConfig
26
+ from peft.utils import (
27
+ ModulesToSaveWrapper,
28
+ _get_submodules,
29
+ )
30
+
31
+ from .tuners_utils import BaseTuner, BaseTunerLayer, check_adapters_to_merge, check_target_module_exists
32
+
33
+
34
+ @dataclass
35
+ class LycorisConfig(PeftConfig):
36
+ r"""
37
+ A base config for LyCORIS like adapters
38
+ """
39
+
40
+ rank_pattern: Optional[dict] = field(
41
+ default_factory=dict,
42
+ metadata={
43
+ "help": (
44
+ "The mapping from layer names or regexp expression to ranks which are different from the default rank specified by `r`. "
45
+ "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 8`}"
46
+ )
47
+ },
48
+ )
49
+ alpha_pattern: Optional[dict] = field(
50
+ default_factory=dict,
51
+ metadata={
52
+ "help": (
53
+ "The mapping from layer names or regexp expression to alphas which are different from the default alpha specified by `alpha`. "
54
+ "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 32`}"
55
+ )
56
+ },
57
+ )
58
+
59
+
60
+ class LycorisLayer(BaseTunerLayer):
61
+ r"""
62
+ A base layer for LyCORIS like adapters
63
+ """
64
+
65
+ # adapter_layer_names needs to be defined on the child class
66
+ other_param_names = ("r", "alpha", "scaling", "rank_dropout", "module_dropout")
67
+
68
+ def __init__(self, base_layer: nn.Module) -> None:
69
+ self.base_layer = base_layer
70
+ self.r = {}
71
+ self.alpha = {}
72
+ self.scaling = {}
73
+ self.rank_dropout = {}
74
+ self.module_dropout = {}
75
+
76
+ # Tuner info
77
+ self._disable_adapters = False
78
+ self.merged_adapters = []
79
+
80
+ @property
81
+ @abstractmethod
82
+ def _available_adapters(self) -> set[str]:
83
+ ...
84
+
85
+ def _init_empty_weights(self, cls, *args, **kwargs) -> None:
86
+ # A helper method that allows to initialize the layer of the given class without spending time to initialize the
87
+ # model weights. The implementation is inspired by
88
+ # https://pytorch.org/docs/stable/generated/torch.nn.utils.skip_init.html but this function cannot be used
89
+ # directly.
90
+ # Instead of this approach, it would be possible to bypass the __init__ of the class but that runs the risk of
91
+ # omitting important logic inside that __init__.
92
+ kwargs = kwargs.copy()
93
+ final_device = kwargs.pop("device", "cpu")
94
+ cls.__init__(self, *args, device="meta", **kwargs)
95
+ self.to_empty(device=final_device)
96
+
97
+ @abstractmethod
98
+ def create_adapter_parameters(self, adapter_name: str, r: int, **kwargs):
99
+ ...
100
+
101
+ # TODO: refactor LoRA to use the same approach
102
+ @abstractmethod
103
+ def _get_delta_activations(self, adapter_name: str, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
104
+ """Activations added on top of the base layer output (i.e. after the base layer forward pass)"""
105
+
106
+ @abstractmethod
107
+ def get_delta_weight(self, adapter_name: str) -> torch.Tensor:
108
+ ...
109
+
110
+ def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
111
+ """
112
+ Merge the active adapter weights into the base weights
113
+
114
+ Args:
115
+ safe_merge (`bool`, *optional*):
116
+ If `True`, the merge operation will be performed in a copy of the original weights and check for NaNs
117
+ before merging the weights. This is useful if you want to check if the merge operation will produce
118
+ NaNs. Defaults to `False`.
119
+ adapter_names (`List[str]`, *optional*):
120
+ The list of adapter names that should be merged. If `None`, all active adapters will be merged.
121
+ Defaults to `None`.
122
+ """
123
+ adapter_names = check_adapters_to_merge(self, adapter_names)
124
+ if not adapter_names:
125
+ # no adapter to merge
126
+ return
127
+
128
+ for active_adapter in adapter_names:
129
+ if active_adapter in self._available_adapters:
130
+ base_layer = self.get_base_layer()
131
+ if safe_merge:
132
+ orig_weights = base_layer.weight.data.clone()
133
+ orig_weights += self.get_delta_weight(active_adapter)
134
+
135
+ if not torch.isfinite(orig_weights).all():
136
+ raise ValueError(
137
+ f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
138
+ )
139
+
140
+ base_layer.weight.data = orig_weights
141
+ else:
142
+ base_layer.weight.data += self.get_delta_weight(active_adapter)
143
+ self.merged_adapters.append(active_adapter)
144
+
145
+ @abstractmethod
146
+ def reset_adapter_parameters(self, adapter_name: str):
147
+ ...
148
+
149
+ def set_scale(self, adapter, scale):
150
+ if adapter not in self._available_adapters:
151
+ # Ignore the case where the adapter is not in the layer
152
+ return
153
+ self.scaling[adapter] = scale * self.alpha[adapter] / self.r[adapter]
154
+
155
+ def scale_layer(self, scale: float) -> None:
156
+ if scale == 1:
157
+ return
158
+
159
+ for active_adapter in self.active_adapters:
160
+ if active_adapter not in self._available_adapters:
161
+ continue
162
+
163
+ self.scaling[active_adapter] *= scale
164
+
165
+ def unmerge(self) -> None:
166
+ """
167
+ This method unmerges all merged adapter layers from the base weights.
168
+ """
169
+ if not self.merged:
170
+ warnings.warn("Already unmerged. Nothing to do.")
171
+ return
172
+ while len(self.merged_adapters) > 0:
173
+ active_adapter = self.merged_adapters.pop()
174
+ if active_adapter in self._available_adapters:
175
+ self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter)
176
+
177
+ def unscale_layer(self, scale=None) -> None:
178
+ for active_adapter in self.active_adapters:
179
+ if active_adapter not in self._available_adapters:
180
+ continue
181
+
182
+ if scale is None:
183
+ self.scaling[active_adapter] = self.alpha[active_adapter] / self.r[active_adapter]
184
+ else:
185
+ self.scaling[active_adapter] /= scale
186
+
187
+ @abstractmethod
188
+ def update_layer(self, adapter_name: str, r: int, alpha: float, **kwargs):
189
+ ...
190
+
191
+
192
+ class LycorisTuner(BaseTuner):
193
+ r"""
194
+ A base tuner for LyCORIS like adapters
195
+ """
196
+
197
+ prefix: str
198
+ layers_mapping: dict[type[torch.nn.Module], type[LycorisLayer]]
199
+
200
+ def __init__(self, model, config, adapter_name):
201
+ super().__init__(model, config, adapter_name)
202
+
203
+ def __getattr__(self, name: str):
204
+ """Forward missing attributes to the wrapped module."""
205
+ try:
206
+ return super().__getattr__(name) # defer to nn.Module's logic
207
+ except AttributeError:
208
+ return getattr(self.model, name)
209
+
210
+ @staticmethod
211
+ def _check_target_module_exists(config, key):
212
+ return check_target_module_exists(config, key)
213
+
214
+ @abstractmethod
215
+ def _create_and_replace(
216
+ self,
217
+ config: LycorisConfig,
218
+ adapter_name: str,
219
+ target: Union[LycorisLayer, nn.Module],
220
+ target_name,
221
+ parent,
222
+ current_key,
223
+ ):
224
+ ...
225
+
226
+ @classmethod
227
+ def _create_new_module(cls, config: LycorisConfig, adapter_name: str, target: nn.Module, **kwargs) -> LycorisLayer:
228
+ # Find corresponding subtype of provided target module
229
+ new_module_cls = None
230
+ for subtype, target_cls in cls.layers_mapping.items():
231
+ if (
232
+ hasattr(target, "base_layer")
233
+ and isinstance(target.get_base_layer(), subtype)
234
+ and isinstance(target, BaseTunerLayer)
235
+ ):
236
+ # nested tuner layers are allowed
237
+ new_module_cls = target_cls
238
+ break
239
+ elif isinstance(target, subtype):
240
+ new_module_cls = target_cls
241
+ break
242
+
243
+ # We didn't find corresponding type, so adapter for this layer is not supported
244
+ if new_module_cls is None:
245
+ supported_modules = ", ".join(layer.__name__ for layer in cls.layers_mapping.keys())
246
+ raise ValueError(
247
+ f"Target module of type {type(target)} not supported, "
248
+ f"currently only adapters for {supported_modules} are supported"
249
+ )
250
+
251
+ if isinstance(target, BaseTunerLayer):
252
+ target_base_layer = target.get_base_layer()
253
+ else:
254
+ target_base_layer = target
255
+
256
+ if isinstance(target_base_layer, torch.nn.Conv2d):
257
+ new_module = new_module_cls(target, adapter_name=adapter_name, **kwargs)
258
+ elif isinstance(target_base_layer, torch.nn.Linear):
259
+ new_module = new_module_cls(target, adapter_name=adapter_name, **kwargs)
260
+ else:
261
+ supported_modules = ", ".join(layer.__name__ for layer in cls.layers_mapping.keys())
262
+ raise ValueError(
263
+ f"Target module of type {type(target)} not supported, "
264
+ f"currently only adapters for {supported_modules} are supported"
265
+ )
266
+
267
+ return new_module
268
+
269
+ def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None:
270
+ for n, p in model.named_parameters():
271
+ if self.prefix not in n:
272
+ p.requires_grad = False
273
+
274
+ @staticmethod
275
+ def _prepare_adapter_config(peft_config, model_config):
276
+ if peft_config.target_modules is None:
277
+ raise ValueError("Please specify `target_modules` in `peft_config`")
278
+ return peft_config
279
+
280
+ def _replace_module(self, parent, child_name, new_module, child):
281
+ setattr(parent, child_name, new_module)
282
+ # It's not necessary to set requires_grad here, as that is handled by
283
+ # _mark_only_adapters_as_trainable
284
+
285
+ if not hasattr(new_module, "base_layer"):
286
+ new_module.weight = child.weight
287
+ if hasattr(child, "bias"):
288
+ new_module.bias = child.bias
289
+
290
+ if getattr(child, "state", None) is not None:
291
+ if hasattr(new_module, "base_layer"):
292
+ new_module.base_layer.state = child.state
293
+ else:
294
+ new_module.state = child.state
295
+ new_module.to(child.weight.device)
296
+
297
+ # dispatch to correct device
298
+ for name, module in new_module.named_modules():
299
+ if self.prefix in name:
300
+ module.to(child.weight.device)
301
+
302
+ def _set_adapter_layers(self, enabled=True):
303
+ for module in self.model.modules():
304
+ if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)):
305
+ module.enable_adapters(enabled)
306
+
307
+ def _unload_and_optionally_merge(
308
+ self,
309
+ merge: bool = True,
310
+ progressbar: bool = False,
311
+ safe_merge: bool = False,
312
+ adapter_names: Optional[list[str]] = None,
313
+ ):
314
+ if merge:
315
+ if getattr(self.model, "quantization_method", None) == "gptq":
316
+ raise ValueError("Cannot merge LOHA layers when the model is gptq quantized")
317
+
318
+ self._unloading_checks(adapter_names)
319
+ key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key]
320
+ desc = "Unloading " + ("and merging " if merge else "") + "model"
321
+ for key in tqdm(key_list, disable=not progressbar, desc=desc):
322
+ try:
323
+ parent, target, target_name = _get_submodules(self.model, key)
324
+ except AttributeError:
325
+ continue
326
+
327
+ if hasattr(target, "base_layer"):
328
+ if merge:
329
+ target.merge(safe_merge=safe_merge, adapter_names=adapter_names)
330
+ self._replace_module(parent, target_name, target.get_base_layer(), target)
331
+ elif isinstance(target, ModulesToSaveWrapper):
332
+ # save any additional trainable modules part of `modules_to_save`
333
+ new_module = target.modules_to_save[target.active_adapter]
334
+ if hasattr(new_module, "base_layer"):
335
+ # check if the module is itself a tuner layer
336
+ if merge:
337
+ new_module.merge(safe_merge=safe_merge, adapter_names=adapter_names)
338
+ new_module = new_module.get_base_layer()
339
+ setattr(parent, target_name, new_module)
340
+
341
+ return self.model
342
+
343
+ def enable_adapter_layers(self) -> None:
344
+ """Enable all adapters.
345
+
346
+ Call this if you have previously disabled all adapters and want to re-enable them.
347
+ """
348
+ self._set_adapter_layers(enabled=True)
349
+
350
+ def disable_adapter_layers(self) -> None:
351
+ """Disable all adapters.
352
+
353
+ When disabling all adapters, the model output corresponds to the output of the base model.
354
+ """
355
+ self._set_adapter_layers(enabled=False)
356
+
357
+ def merge_and_unload(
358
+ self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None
359
+ ) -> torch.nn.Module:
360
+ r"""
361
+ This method merges the adapter layers into the base model. This is needed if someone wants to use the base
362
+ model as a standalone model.
363
+
364
+ Args:
365
+ progressbar (`bool`):
366
+ whether to show a progressbar indicating the unload and merge process
367
+ safe_merge (`bool`):
368
+ whether to activate the safe merging check to check if there is any potential Nan in the adapter
369
+ weights
370
+ adapter_names (`List[str]`, *optional*):
371
+ The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
372
+ to `None`.
373
+
374
+ """
375
+ return self._unload_and_optionally_merge(
376
+ progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names
377
+ )
378
+
379
+ def unload(self) -> torch.nn.Module:
380
+ """
381
+ Gets back the base model by removing all the lora modules without merging. This gives back the original base
382
+ model.
383
+ """
384
+ return self._unload_and_optionally_merge(merge=False)
385
+
386
+ def set_adapter(self, adapter_name: str | list[str]) -> None:
387
+ """Set the active adapter(s).
388
+
389
+ Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is
390
+ not desired, use the following code.
391
+
392
+ ```py
393
+ >>> for name, param in model_peft.named_parameters():
394
+ ... if ...: # some check on name (ex. if 'lora' in name)
395
+ ... param.requires_grad = False
396
+ ```
397
+
398
+ Args:
399
+ adapter_name (`str` or `list[str]`): Name of the adapter(s) to be activated.
400
+ """
401
+ for module in self.model.modules():
402
+ if isinstance(module, LycorisLayer):
403
+ if module.merged:
404
+ warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.")
405
+ module.unmerge()
406
+ module.set_adapter(adapter_name)
407
+
408
+ def delete_adapter(self, adapter_name: str) -> None:
409
+ """
410
+ Deletes an existing adapter.
411
+
412
+ Args:
413
+ adapter_name (`str`): Name of the adapter to be deleted.
414
+ """
415
+ if adapter_name not in list(self.peft_config.keys()):
416
+ raise ValueError(f"Adapter {adapter_name} does not exist")
417
+ del self.peft_config[adapter_name]
418
+
419
+ key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key]
420
+ new_adapter = None
421
+ for key in key_list:
422
+ _, target, _ = _get_submodules(self.model, key)
423
+ if isinstance(target, LycorisLayer):
424
+ target.delete_adapter(adapter_name)
425
+ if new_adapter is None:
426
+ new_adapter = target.active_adapters[:]
427
+
428
+ self.active_adapter = new_adapter or []
env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .config import MultitaskPromptTuningConfig, MultitaskPromptTuningInit
16
+ from .model import MultitaskPromptEmbedding
17
+
18
+
19
+ __all__ = ["MultitaskPromptTuningConfig", "MultitaskPromptTuningInit", "MultitaskPromptEmbedding"]
env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (384 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/config.cpython-310.pyc ADDED
Binary file (1.83 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/model.cpython-310.pyc ADDED
Binary file (2.42 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/config.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import enum
16
+ from dataclasses import dataclass, field
17
+ from typing import Optional, Union
18
+
19
+ from peft.tuners.prompt_tuning import PromptTuningConfig
20
+ from peft.utils import PeftType
21
+
22
+
23
+ class MultitaskPromptTuningInit(str, enum.Enum):
24
+ # initialize prompt with text
25
+ TEXT = "TEXT"
26
+ # initialize prompt with random matrix
27
+ RANDOM = "RANDOM"
28
+ # average the prefix and column matrices obtained during source training
29
+ AVERAGE_SOURCE_TASKS = "AVERAGE_SOURCE_TASKS"
30
+ # pick prefix and column matrices for a particular task obtained during source training
31
+ EXACT_SOURCE_TASK = "EXACT_SOURCE_TASK"
32
+ # only use the prompt embeddings trained during source training
33
+ ONLY_SOURCE_SHARED = "ONLY_SOURCE_SHARED"
34
+
35
+
36
+ @dataclass
37
+ class MultitaskPromptTuningConfig(PromptTuningConfig):
38
+ prompt_tuning_init: Union[MultitaskPromptTuningInit, str] = field(
39
+ default=MultitaskPromptTuningInit.RANDOM,
40
+ metadata={
41
+ "help": (
42
+ "How to initialize the prompt tuning parameters. Can be one of TEXT, RANDOM, AVERAGE_SOURCE_TASKS, "
43
+ "EXACT_SOURCE_TASK, ONLY_SOURCE_SHARED."
44
+ ),
45
+ },
46
+ )
47
+ prompt_tuning_init_state_dict_path: Optional[str] = field(
48
+ default=None,
49
+ metadata={
50
+ "help": (
51
+ "The path of source state dict. This is required when training the downstream target prompt from "
52
+ "the pretrained source prompt"
53
+ ),
54
+ },
55
+ )
56
+ prompt_tuning_init_task: Optional[int] = field(default=0, metadata={"help": "source task id for initialization"})
57
+ num_ranks: Optional[int] = field(default=1, metadata={"help": "ranks"})
58
+ num_tasks: Optional[int] = field(default=1, metadata={"help": "number of tasks"})
59
+
60
+ def __post_init__(self):
61
+ self.peft_type = PeftType.MULTITASK_PROMPT_TUNING
env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/model.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import torch
16
+
17
+ from peft.tuners.prompt_tuning import PromptEmbedding
18
+ from peft.utils import TaskType
19
+
20
+ from .config import MultitaskPromptTuningConfig, MultitaskPromptTuningInit
21
+
22
+
23
+ # This code is adapted for the paper: https://arxiv.org/abs/2303.02861 and
24
+ # constitutes the work done at MIT-IBM Watson Research Lab.
25
+
26
+
27
+ class MultitaskPromptEmbedding(PromptEmbedding):
28
+ def __init__(self, config: MultitaskPromptTuningConfig, word_embeddings):
29
+ super().__init__(config, word_embeddings)
30
+
31
+ self.num_tasks = config.num_tasks
32
+ self.num_ranks = config.num_ranks
33
+ self.num_virtual_tokens = config.num_virtual_tokens
34
+
35
+ self.num_transformer_submodules = config.num_transformer_submodules
36
+ if self.num_transformer_submodules is None:
37
+ self.num_transformer_submodules = 2 if config.task_type == TaskType.SEQ_2_SEQ_LM else 1
38
+
39
+ self.token_dim = config.token_dim
40
+
41
+ total_virtual_tokens = self.num_virtual_tokens * self.num_transformer_submodules
42
+
43
+ self.prefix_task_cols = torch.nn.Parameter(
44
+ torch.normal(
45
+ mean=0,
46
+ std=0.02,
47
+ size=(self.num_tasks, total_virtual_tokens, self.num_ranks),
48
+ )
49
+ )
50
+ self.prefix_task_rows = torch.nn.Parameter(
51
+ torch.normal(
52
+ mean=0,
53
+ std=0.02,
54
+ size=(self.num_tasks, self.num_ranks, self.token_dim),
55
+ )
56
+ )
57
+
58
+ if config.prompt_tuning_init in [
59
+ MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS,
60
+ MultitaskPromptTuningInit.EXACT_SOURCE_TASK,
61
+ MultitaskPromptTuningInit.ONLY_SOURCE_SHARED,
62
+ ]:
63
+ if config.prompt_tuning_init_state_dict_path is None:
64
+ raise ValueError(
65
+ f"prompt_tuning_init_state_dict_path needs to be specified with {config.prompt_tuning_init} "
66
+ "init method"
67
+ )
68
+
69
+ # TODO: There should be an option for safetensors
70
+ state_dict: dict = torch.load(
71
+ config.prompt_tuning_init_state_dict_path,
72
+ map_location=word_embeddings.weight.device,
73
+ )
74
+
75
+ if config.prompt_tuning_init in [
76
+ MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS,
77
+ MultitaskPromptTuningInit.EXACT_SOURCE_TASK,
78
+ ]:
79
+ prefix_task_cols_: torch.Tensor = state_dict["prefix_task_cols"]
80
+ prefix_task_rows_: torch.Tensor = state_dict["prefix_task_rows"]
81
+
82
+ if config.prompt_tuning_init == MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS:
83
+ prefix_task_cols_ = prefix_task_cols_.mean(0, keepdim=True)
84
+ prefix_task_rows_ = prefix_task_rows_.mean(0, keepdim=True)
85
+ elif config.prompt_tuning_init == MultitaskPromptTuningInit.EXACT_SOURCE_TASK:
86
+ prefix_task_cols_ = prefix_task_cols_[config.prompt_tuning_init_task, ...].unsqueeze(0)
87
+ prefix_task_rows_ = prefix_task_rows_[config.prompt_tuning_init_task, ...].unsqueeze(0)
88
+
89
+ state_dict = {
90
+ "embedding.weight": state_dict["prompt_embeddings"],
91
+ "prefix_task_cols": prefix_task_cols_,
92
+ "prefix_task_rows": prefix_task_rows_,
93
+ }
94
+
95
+ self.load_state_dict(state_dict, strict=True)
96
+ elif config.prompt_tuning_init == MultitaskPromptTuningInit.ONLY_SOURCE_SHARED:
97
+ state_dict = {
98
+ "embedding.weight": state_dict["prompt_embeddings"],
99
+ }
100
+
101
+ self.load_state_dict(state_dict, strict=False)
102
+
103
+ def forward(self, indices, task_ids):
104
+ if task_ids is None:
105
+ raise ValueError("task_ids cannot be None")
106
+
107
+ prompt_embeddings = self.embedding(indices)
108
+
109
+ task_cols = torch.index_select(self.prefix_task_cols, 0, task_ids)
110
+ task_rows = torch.index_select(self.prefix_task_rows, 0, task_ids)
111
+ task_prompts = torch.matmul(task_cols, task_rows)
112
+
113
+ prompt_embeddings *= task_prompts
114
+
115
+ return prompt_embeddings
env-llmeval/lib/python3.10/site-packages/peft/tuners/oft/__pycache__/config.cpython-310.pyc ADDED
Binary file (5.12 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/oft/__pycache__/layer.cpython-310.pyc ADDED
Binary file (10.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/__init__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .config import PolyConfig
16
+ from .layer import Linear, PolyLayer
17
+ from .model import PolyModel
18
+
19
+
20
+ __all__ = ["Linear", "PolyConfig", "PolyLayer", "PolyModel"]
env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (358 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/config.cpython-310.pyc ADDED
Binary file (3.25 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/layer.cpython-310.pyc ADDED
Binary file (4.09 kB). View file
 
env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/model.cpython-310.pyc ADDED
Binary file (6.24 kB). View file