applied-ai-018 commited on
Commit
1d0bd1d
·
verified ·
1 Parent(s): 28c740f

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/11.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step40/zero/11.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step40/zero/16.attention.dense.weight/exp_avg.pt +3 -0
  4. ckpts/universal/global_step40/zero/16.attention.dense.weight/exp_avg_sq.pt +3 -0
  5. ckpts/universal/global_step40/zero/24.post_attention_layernorm.weight/fp32.pt +3 -0
  6. venv/lib/python3.10/site-packages/nvidia_cublas_cu12-12.1.3.1.dist-info/INSTALLER +1 -0
  7. venv/lib/python3.10/site-packages/nvidia_cublas_cu12-12.1.3.1.dist-info/License.txt +1568 -0
  8. venv/lib/python3.10/site-packages/nvidia_cublas_cu12-12.1.3.1.dist-info/METADATA +35 -0
  9. venv/lib/python3.10/site-packages/nvidia_cublas_cu12-12.1.3.1.dist-info/RECORD +23 -0
  10. venv/lib/python3.10/site-packages/nvidia_cublas_cu12-12.1.3.1.dist-info/WHEEL +5 -0
  11. venv/lib/python3.10/site-packages/nvidia_cublas_cu12-12.1.3.1.dist-info/top_level.txt +1 -0
  12. venv/lib/python3.10/site-packages/peft/__init__.py +90 -0
  13. venv/lib/python3.10/site-packages/peft/__pycache__/__init__.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/peft/__pycache__/auto.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/peft/__pycache__/config.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/peft/__pycache__/helpers.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/peft/__pycache__/import_utils.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/peft/__pycache__/mapping.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/peft/__pycache__/mixed_model.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/peft/__pycache__/peft_model.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/peft/auto.py +170 -0
  22. venv/lib/python3.10/site-packages/peft/config.py +270 -0
  23. venv/lib/python3.10/site-packages/peft/helpers.py +113 -0
  24. venv/lib/python3.10/site-packages/peft/import_utils.py +73 -0
  25. venv/lib/python3.10/site-packages/peft/mapping.py +168 -0
  26. venv/lib/python3.10/site-packages/peft/mixed_model.py +409 -0
  27. venv/lib/python3.10/site-packages/peft/peft_model.py +1986 -0
  28. venv/lib/python3.10/site-packages/peft/py.typed +0 -0
  29. venv/lib/python3.10/site-packages/peft/tuners/__init__.py +32 -0
  30. venv/lib/python3.10/site-packages/peft/tuners/ia3/__init__.py +36 -0
  31. venv/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/__init__.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/bnb.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/config.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/layer.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/model.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/peft/tuners/ia3/bnb.py +129 -0
  37. venv/lib/python3.10/site-packages/peft/tuners/ia3/config.py +98 -0
  38. venv/lib/python3.10/site-packages/peft/tuners/ia3/layer.py +307 -0
  39. venv/lib/python3.10/site-packages/peft/tuners/ia3/model.py +394 -0
  40. venv/lib/python3.10/site-packages/peft/tuners/lycoris_utils.py +428 -0
  41. venv/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__init__.py +19 -0
  42. venv/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/__init__.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/config.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/model.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/config.py +61 -0
  46. venv/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/model.py +115 -0
  47. venv/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__init__.py +19 -0
  48. venv/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__pycache__/__init__.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__pycache__/config.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__pycache__/model.cpython-310.pyc +0 -0
ckpts/universal/global_step40/zero/11.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c877cdb9087bc67a99b5da3c6b738f42f03d85e824cd43670cfc1e6f955a0fe3
3
+ size 33555612
ckpts/universal/global_step40/zero/11.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:573bd35086b97daac26d6563ac76202a66363e1207a7df76368d8f09f9a4ac77
3
+ size 33555533
ckpts/universal/global_step40/zero/16.attention.dense.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:127098c5275b2c6c8c594933ae9a56abdf1f8bc6818f16fc0cbc19ff4df8af1f
3
+ size 16778396
ckpts/universal/global_step40/zero/16.attention.dense.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e64244bd627bd34e94c22be4e24cd90d14abc9460abb9d9df978e700030b02b
3
+ size 16778411
ckpts/universal/global_step40/zero/24.post_attention_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3887231cf4a4dd11dd835a678e162b9c1a3518b0eb583264d666d97398b93648
3
+ size 9293
venv/lib/python3.10/site-packages/nvidia_cublas_cu12-12.1.3.1.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
venv/lib/python3.10/site-packages/nvidia_cublas_cu12-12.1.3.1.dist-info/License.txt ADDED
@@ -0,0 +1,1568 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ End User License Agreement
2
+ --------------------------
3
+
4
+
5
+ Preface
6
+ -------
7
+
8
+ The Software License Agreement in Chapter 1 and the Supplement
9
+ in Chapter 2 contain license terms and conditions that govern
10
+ the use of NVIDIA software. By accepting this agreement, you
11
+ agree to comply with all the terms and conditions applicable
12
+ to the product(s) included herein.
13
+
14
+
15
+ NVIDIA Driver
16
+
17
+
18
+ Description
19
+
20
+ This package contains the operating system driver and
21
+ fundamental system software components for NVIDIA GPUs.
22
+
23
+
24
+ NVIDIA CUDA Toolkit
25
+
26
+
27
+ Description
28
+
29
+ The NVIDIA CUDA Toolkit provides command-line and graphical
30
+ tools for building, debugging and optimizing the performance
31
+ of applications accelerated by NVIDIA GPUs, runtime and math
32
+ libraries, and documentation including programming guides,
33
+ user manuals, and API references.
34
+
35
+
36
+ Default Install Location of CUDA Toolkit
37
+
38
+ Windows platform:
39
+
40
+ %ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v#.#
41
+
42
+ Linux platform:
43
+
44
+ /usr/local/cuda-#.#
45
+
46
+ Mac platform:
47
+
48
+ /Developer/NVIDIA/CUDA-#.#
49
+
50
+
51
+ NVIDIA CUDA Samples
52
+
53
+
54
+ Description
55
+
56
+ This package includes over 100+ CUDA examples that demonstrate
57
+ various CUDA programming principles, and efficient CUDA
58
+ implementation of algorithms in specific application domains.
59
+
60
+
61
+ Default Install Location of CUDA Samples
62
+
63
+ Windows platform:
64
+
65
+ %ProgramData%\NVIDIA Corporation\CUDA Samples\v#.#
66
+
67
+ Linux platform:
68
+
69
+ /usr/local/cuda-#.#/samples
70
+
71
+ and
72
+
73
+ $HOME/NVIDIA_CUDA-#.#_Samples
74
+
75
+ Mac platform:
76
+
77
+ /Developer/NVIDIA/CUDA-#.#/samples
78
+
79
+
80
+ NVIDIA Nsight Visual Studio Edition (Windows only)
81
+
82
+
83
+ Description
84
+
85
+ NVIDIA Nsight Development Platform, Visual Studio Edition is a
86
+ development environment integrated into Microsoft Visual
87
+ Studio that provides tools for debugging, profiling, analyzing
88
+ and optimizing your GPU computing and graphics applications.
89
+
90
+
91
+ Default Install Location of Nsight Visual Studio Edition
92
+
93
+ Windows platform:
94
+
95
+ %ProgramFiles(x86)%\NVIDIA Corporation\Nsight Visual Studio Edition #.#
96
+
97
+
98
+ 1. License Agreement for NVIDIA Software Development Kits
99
+ ---------------------------------------------------------
100
+
101
+
102
+ Release Date: July 26, 2018
103
+ ---------------------------
104
+
105
+
106
+ Important NoticeRead before downloading, installing,
107
+ copying or using the licensed software:
108
+ -------------------------------------------------------
109
+
110
+ This license agreement, including exhibits attached
111
+ ("Agreement”) is a legal agreement between you and NVIDIA
112
+ Corporation ("NVIDIA") and governs your use of a NVIDIA
113
+ software development kit (“SDK”).
114
+
115
+ Each SDK has its own set of software and materials, but here
116
+ is a description of the types of items that may be included in
117
+ a SDK: source code, header files, APIs, data sets and assets
118
+ (examples include images, textures, models, scenes, videos,
119
+ native API input/output files), binary software, sample code,
120
+ libraries, utility programs, programming code and
121
+ documentation.
122
+
123
+ This Agreement can be accepted only by an adult of legal age
124
+ of majority in the country in which the SDK is used.
125
+
126
+ If you are entering into this Agreement on behalf of a company
127
+ or other legal entity, you represent that you have the legal
128
+ authority to bind the entity to this Agreement, in which case
129
+ “you” will mean the entity you represent.
130
+
131
+ If you don’t have the required age or authority to accept
132
+ this Agreement, or if you don’t accept all the terms and
133
+ conditions of this Agreement, do not download, install or use
134
+ the SDK.
135
+
136
+ You agree to use the SDK only for purposes that are permitted
137
+ by (a) this Agreement, and (b) any applicable law, regulation
138
+ or generally accepted practices or guidelines in the relevant
139
+ jurisdictions.
140
+
141
+
142
+ 1.1. License
143
+
144
+
145
+ 1.1.1. License Grant
146
+
147
+ Subject to the terms of this Agreement, NVIDIA hereby grants
148
+ you a non-exclusive, non-transferable license, without the
149
+ right to sublicense (except as expressly provided in this
150
+ Agreement) to:
151
+
152
+ 1. Install and use the SDK,
153
+
154
+ 2. Modify and create derivative works of sample source code
155
+ delivered in the SDK, and
156
+
157
+ 3. Distribute those portions of the SDK that are identified
158
+ in this Agreement as distributable, as incorporated in
159
+ object code format into a software application that meets
160
+ the distribution requirements indicated in this Agreement.
161
+
162
+
163
+ 1.1.2. Distribution Requirements
164
+
165
+ These are the distribution requirements for you to exercise
166
+ the distribution grant:
167
+
168
+ 1. Your application must have material additional
169
+ functionality, beyond the included portions of the SDK.
170
+
171
+ 2. The distributable portions of the SDK shall only be
172
+ accessed by your application.
173
+
174
+ 3. The following notice shall be included in modifications
175
+ and derivative works of sample source code distributed:
176
+ “This software contains source code provided by NVIDIA
177
+ Corporation.”
178
+
179
+ 4. Unless a developer tool is identified in this Agreement
180
+ as distributable, it is delivered for your internal use
181
+ only.
182
+
183
+ 5. The terms under which you distribute your application
184
+ must be consistent with the terms of this Agreement,
185
+ including (without limitation) terms relating to the
186
+ license grant and license restrictions and protection of
187
+ NVIDIA’s intellectual property rights. Additionally, you
188
+ agree that you will protect the privacy, security and
189
+ legal rights of your application users.
190
+
191
+ 6. You agree to notify NVIDIA in writing of any known or
192
+ suspected distribution or use of the SDK not in compliance
193
+ with the requirements of this Agreement, and to enforce
194
+ the terms of your agreements with respect to distributed
195
+ SDK.
196
+
197
+
198
+ 1.1.3. Authorized Users
199
+
200
+ You may allow employees and contractors of your entity or of
201
+ your subsidiary(ies) to access and use the SDK from your
202
+ secure network to perform work on your behalf.
203
+
204
+ If you are an academic institution you may allow users
205
+ enrolled or employed by the academic institution to access and
206
+ use the SDK from your secure network.
207
+
208
+ You are responsible for the compliance with the terms of this
209
+ Agreement by your authorized users. If you become aware that
210
+ your authorized users didn’t follow the terms of this
211
+ Agreement, you agree to take reasonable steps to resolve the
212
+ non-compliance and prevent new occurrences.
213
+
214
+
215
+ 1.1.4. Pre-Release SDK
216
+
217
+ The SDK versions identified as alpha, beta, preview or
218
+ otherwise as pre-release, may not be fully functional, may
219
+ contain errors or design flaws, and may have reduced or
220
+ different security, privacy, accessibility, availability, and
221
+ reliability standards relative to commercial versions of
222
+ NVIDIA software and materials. Use of a pre-release SDK may
223
+ result in unexpected results, loss of data, project delays or
224
+ other unpredictable damage or loss.
225
+
226
+ You may use a pre-release SDK at your own risk, understanding
227
+ that pre-release SDKs are not intended for use in production
228
+ or business-critical systems.
229
+
230
+ NVIDIA may choose not to make available a commercial version
231
+ of any pre-release SDK. NVIDIA may also choose to abandon
232
+ development and terminate the availability of a pre-release
233
+ SDK at any time without liability.
234
+
235
+
236
+ 1.1.5. Updates
237
+
238
+ NVIDIA may, at its option, make available patches, workarounds
239
+ or other updates to this SDK. Unless the updates are provided
240
+ with their separate governing terms, they are deemed part of
241
+ the SDK licensed to you as provided in this Agreement. You
242
+ agree that the form and content of the SDK that NVIDIA
243
+ provides may change without prior notice to you. While NVIDIA
244
+ generally maintains compatibility between versions, NVIDIA may
245
+ in some cases make changes that introduce incompatibilities in
246
+ future versions of the SDK.
247
+
248
+
249
+ 1.1.6. Third Party Licenses
250
+
251
+ The SDK may come bundled with, or otherwise include or be
252
+ distributed with, third party software licensed by a NVIDIA
253
+ supplier and/or open source software provided under an open
254
+ source license. Use of third party software is subject to the
255
+ third-party license terms, or in the absence of third party
256
+ terms, the terms of this Agreement. Copyright to third party
257
+ software is held by the copyright holders indicated in the
258
+ third-party software or license.
259
+
260
+
261
+ 1.1.7. Reservation of Rights
262
+
263
+ NVIDIA reserves all rights, title, and interest in and to the
264
+ SDK, not expressly granted to you under this Agreement.
265
+
266
+
267
+ 1.2. Limitations
268
+
269
+ The following license limitations apply to your use of the
270
+ SDK:
271
+
272
+ 1. You may not reverse engineer, decompile or disassemble,
273
+ or remove copyright or other proprietary notices from any
274
+ portion of the SDK or copies of the SDK.
275
+
276
+ 2. Except as expressly provided in this Agreement, you may
277
+ not copy, sell, rent, sublicense, transfer, distribute,
278
+ modify, or create derivative works of any portion of the
279
+ SDK. For clarity, you may not distribute or sublicense the
280
+ SDK as a stand-alone product.
281
+
282
+ 3. Unless you have an agreement with NVIDIA for this
283
+ purpose, you may not indicate that an application created
284
+ with the SDK is sponsored or endorsed by NVIDIA.
285
+
286
+ 4. You may not bypass, disable, or circumvent any
287
+ encryption, security, digital rights management or
288
+ authentication mechanism in the SDK.
289
+
290
+ 5. You may not use the SDK in any manner that would cause it
291
+ to become subject to an open source software license. As
292
+ examples, licenses that require as a condition of use,
293
+ modification, and/or distribution that the SDK be:
294
+
295
+ a. Disclosed or distributed in source code form;
296
+
297
+ b. Licensed for the purpose of making derivative works;
298
+ or
299
+
300
+ c. Redistributable at no charge.
301
+
302
+ 6. Unless you have an agreement with NVIDIA for this
303
+ purpose, you may not use the SDK with any system or
304
+ application where the use or failure of the system or
305
+ application can reasonably be expected to threaten or
306
+ result in personal injury, death, or catastrophic loss.
307
+ Examples include use in avionics, navigation, military,
308
+ medical, life support or other life critical applications.
309
+ NVIDIA does not design, test or manufacture the SDK for
310
+ these critical uses and NVIDIA shall not be liable to you
311
+ or any third party, in whole or in part, for any claims or
312
+ damages arising from such uses.
313
+
314
+ 7. You agree to defend, indemnify and hold harmless NVIDIA
315
+ and its affiliates, and their respective employees,
316
+ contractors, agents, officers and directors, from and
317
+ against any and all claims, damages, obligations, losses,
318
+ liabilities, costs or debt, fines, restitutions and
319
+ expenses (including but not limited to attorney’s fees
320
+ and costs incident to establishing the right of
321
+ indemnification) arising out of or related to your use of
322
+ the SDK outside of the scope of this Agreement, or not in
323
+ compliance with its terms.
324
+
325
+
326
+ 1.3. Ownership
327
+
328
+ 1. NVIDIA or its licensors hold all rights, title and
329
+ interest in and to the SDK and its modifications and
330
+ derivative works, including their respective intellectual
331
+ property rights, subject to your rights described in this
332
+ section. This SDK may include software and materials from
333
+ NVIDIA’s licensors, and these licensors are intended
334
+ third party beneficiaries that may enforce this Agreement
335
+ with respect to their intellectual property rights.
336
+
337
+ 2. You hold all rights, title and interest in and to your
338
+ applications and your derivative works of the sample
339
+ source code delivered in the SDK, including their
340
+ respective intellectual property rights, subject to
341
+ NVIDIA’s rights described in this section.
342
+
343
+ 3. You may, but don’t have to, provide to NVIDIA
344
+ suggestions, feature requests or other feedback regarding
345
+ the SDK, including possible enhancements or modifications
346
+ to the SDK. For any feedback that you voluntarily provide,
347
+ you hereby grant NVIDIA and its affiliates a perpetual,
348
+ non-exclusive, worldwide, irrevocable license to use,
349
+ reproduce, modify, license, sublicense (through multiple
350
+ tiers of sublicensees), and distribute (through multiple
351
+ tiers of distributors) it without the payment of any
352
+ royalties or fees to you. NVIDIA will use feedback at its
353
+ choice. NVIDIA is constantly looking for ways to improve
354
+ its products, so you may send feedback to NVIDIA through
355
+ the developer portal at https://developer.nvidia.com.
356
+
357
+
358
+ 1.4. No Warranties
359
+
360
+ THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL
361
+ FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND
362
+ ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND
363
+ OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING,
364
+ BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS
365
+ FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE
366
+ ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO
367
+ WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF
368
+ DEALING OR COURSE OF TRADE.
369
+
370
+
371
+ 1.5. Limitation of Liability
372
+
373
+ TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS
374
+ AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL,
375
+ PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS
376
+ OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF
377
+ PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION
378
+ WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK,
379
+ WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH
380
+ OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE),
381
+ PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF
382
+ LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES
383
+ TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS
384
+ AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE
385
+ NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS
386
+ LIMIT.
387
+
388
+ These exclusions and limitations of liability shall apply
389
+ regardless if NVIDIA or its affiliates have been advised of
390
+ the possibility of such damages, and regardless of whether a
391
+ remedy fails its essential purpose. These exclusions and
392
+ limitations of liability form an essential basis of the
393
+ bargain between the parties, and, absent any of these
394
+ exclusions or limitations of liability, the provisions of this
395
+ Agreement, including, without limitation, the economic terms,
396
+ would be substantially different.
397
+
398
+
399
+ 1.6. Termination
400
+
401
+ 1. This Agreement will continue to apply until terminated by
402
+ either you or NVIDIA as described below.
403
+
404
+ 2. If you want to terminate this Agreement, you may do so by
405
+ stopping to use the SDK.
406
+
407
+ 3. NVIDIA may, at any time, terminate this Agreement if:
408
+
409
+ a. (i) you fail to comply with any term of this
410
+ Agreement and the non-compliance is not fixed within
411
+ thirty (30) days following notice from NVIDIA (or
412
+ immediately if you violate NVIDIA’s intellectual
413
+ property rights);
414
+
415
+ b. (ii) you commence or participate in any legal
416
+ proceeding against NVIDIA with respect to the SDK; or
417
+
418
+ c. (iii) NVIDIA decides to no longer provide the SDK in
419
+ a country or, in NVIDIA’s sole discretion, the
420
+ continued use of it is no longer commercially viable.
421
+
422
+ 4. Upon any termination of this Agreement, you agree to
423
+ promptly discontinue use of the SDK and destroy all copies
424
+ in your possession or control. Your prior distributions in
425
+ accordance with this Agreement are not affected by the
426
+ termination of this Agreement. Upon written request, you
427
+ will certify in writing that you have complied with your
428
+ commitments under this section. Upon any termination of
429
+ this Agreement all provisions survive except for the
430
+ license grant provisions.
431
+
432
+
433
+ 1.7. General
434
+
435
+ If you wish to assign this Agreement or your rights and
436
+ obligations, including by merger, consolidation, dissolution
437
+ or operation of law, contact NVIDIA to ask for permission. Any
438
+ attempted assignment not approved by NVIDIA in writing shall
439
+ be void and of no effect. NVIDIA may assign, delegate or
440
+ transfer this Agreement and its rights and obligations, and if
441
+ to a non-affiliate you will be notified.
442
+
443
+ You agree to cooperate with NVIDIA and provide reasonably
444
+ requested information to verify your compliance with this
445
+ Agreement.
446
+
447
+ This Agreement will be governed in all respects by the laws of
448
+ the United States and of the State of Delaware as those laws
449
+ are applied to contracts entered into and performed entirely
450
+ within Delaware by Delaware residents, without regard to the
451
+ conflicts of laws principles. The United Nations Convention on
452
+ Contracts for the International Sale of Goods is specifically
453
+ disclaimed. You agree to all terms of this Agreement in the
454
+ English language.
455
+
456
+ The state or federal courts residing in Santa Clara County,
457
+ California shall have exclusive jurisdiction over any dispute
458
+ or claim arising out of this Agreement. Notwithstanding this,
459
+ you agree that NVIDIA shall still be allowed to apply for
460
+ injunctive remedies or an equivalent type of urgent legal
461
+ relief in any jurisdiction.
462
+
463
+ If any court of competent jurisdiction determines that any
464
+ provision of this Agreement is illegal, invalid or
465
+ unenforceable, such provision will be construed as limited to
466
+ the extent necessary to be consistent with and fully
467
+ enforceable under the law and the remaining provisions will
468
+ remain in full force and effect. Unless otherwise specified,
469
+ remedies are cumulative.
470
+
471
+ Each party acknowledges and agrees that the other is an
472
+ independent contractor in the performance of this Agreement.
473
+
474
+ The SDK has been developed entirely at private expense and is
475
+ “commercial items” consisting of “commercial computer
476
+ software” and “commercial computer software
477
+ documentation” provided with RESTRICTED RIGHTS. Use,
478
+ duplication or disclosure by the U.S. Government or a U.S.
479
+ Government subcontractor is subject to the restrictions in
480
+ this Agreement pursuant to DFARS 227.7202-3(a) or as set forth
481
+ in subparagraphs (c)(1) and (2) of the Commercial Computer
482
+ Software - Restricted Rights clause at FAR 52.227-19, as
483
+ applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas
484
+ Expressway, Santa Clara, CA 95051.
485
+
486
+ The SDK is subject to United States export laws and
487
+ regulations. You agree that you will not ship, transfer or
488
+ export the SDK into any country, or use the SDK in any manner,
489
+ prohibited by the United States Bureau of Industry and
490
+ Security or economic sanctions regulations administered by the
491
+ U.S. Department of Treasury’s Office of Foreign Assets
492
+ Control (OFAC), or any applicable export laws, restrictions or
493
+ regulations. These laws include restrictions on destinations,
494
+ end users and end use. By accepting this Agreement, you
495
+ confirm that you are not a resident or citizen of any country
496
+ currently embargoed by the U.S. and that you are not otherwise
497
+ prohibited from receiving the SDK.
498
+
499
+ Any notice delivered by NVIDIA to you under this Agreement
500
+ will be delivered via mail, email or fax. You agree that any
501
+ notices that NVIDIA sends you electronically will satisfy any
502
+ legal communication requirements. Please direct your legal
503
+ notices or other correspondence to NVIDIA Corporation, 2788
504
+ San Tomas Expressway, Santa Clara, California 95051, United
505
+ States of America, Attention: Legal Department.
506
+
507
+ This Agreement and any exhibits incorporated into this
508
+ Agreement constitute the entire agreement of the parties with
509
+ respect to the subject matter of this Agreement and supersede
510
+ all prior negotiations or documentation exchanged between the
511
+ parties relating to this SDK license. Any additional and/or
512
+ conflicting terms on documents issued by you are null, void,
513
+ and invalid. Any amendment or waiver under this Agreement
514
+ shall be in writing and signed by representatives of both
515
+ parties.
516
+
517
+
518
+ 2. CUDA Toolkit Supplement to Software License Agreement for
519
+ NVIDIA Software Development Kits
520
+ ------------------------------------------------------------
521
+
522
+
523
+ Release date: August 16, 2018
524
+ -----------------------------
525
+
526
+ The terms in this supplement govern your use of the NVIDIA
527
+ CUDA Toolkit SDK under the terms of your license agreement
528
+ (“Agreement”) as modified by this supplement. Capitalized
529
+ terms used but not defined below have the meaning assigned to
530
+ them in the Agreement.
531
+
532
+ This supplement is an exhibit to the Agreement and is
533
+ incorporated as an integral part of the Agreement. In the
534
+ event of conflict between the terms in this supplement and the
535
+ terms in the Agreement, the terms in this supplement govern.
536
+
537
+
538
+ 2.1. License Scope
539
+
540
+ The SDK is licensed for you to develop applications only for
541
+ use in systems with NVIDIA GPUs.
542
+
543
+
544
+ 2.2. Distribution
545
+
546
+ The portions of the SDK that are distributable under the
547
+ Agreement are listed in Attachment A.
548
+
549
+
550
+ 2.3. Operating Systems
551
+
552
+ Those portions of the SDK designed exclusively for use on the
553
+ Linux or FreeBSD operating systems, or other operating systems
554
+ derived from the source code to these operating systems, may
555
+ be copied and redistributed for use in accordance with this
556
+ Agreement, provided that the object code files are not
557
+ modified in any way (except for unzipping of compressed
558
+ files).
559
+
560
+
561
+ 2.4. Audio and Video Encoders and Decoders
562
+
563
+ You acknowledge and agree that it is your sole responsibility
564
+ to obtain any additional third-party licenses required to
565
+ make, have made, use, have used, sell, import, and offer for
566
+ sale your products or services that include or incorporate any
567
+ third-party software and content relating to audio and/or
568
+ video encoders and decoders from, including but not limited
569
+ to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A.,
570
+ MPEG-LA, and Coding Technologies. NVIDIA does not grant to you
571
+ under this Agreement any necessary patent or other rights with
572
+ respect to any audio and/or video encoders and decoders.
573
+
574
+
575
+ 2.5. Licensing
576
+
577
+ If the distribution terms in this Agreement are not suitable
578
+ for your organization, or for any questions regarding this
579
+ Agreement, please contact NVIDIA at
580
581
+
582
+
583
+ 2.6. Attachment A
584
+
585
+ The following portions of the SDK are distributable under the
586
+ Agreement:
587
+
588
+ Component
589
+
590
+ CUDA Runtime
591
+
592
+ Windows
593
+
594
+ cudart.dll, cudart_static.lib, cudadevrt.lib
595
+
596
+ Mac OSX
597
+
598
+ libcudart.dylib, libcudart_static.a, libcudadevrt.a
599
+
600
+ Linux
601
+
602
+ libcudart.so, libcudart_static.a, libcudadevrt.a
603
+
604
+ Android
605
+
606
+ libcudart.so, libcudart_static.a, libcudadevrt.a
607
+
608
+ Component
609
+
610
+ CUDA FFT Library
611
+
612
+ Windows
613
+
614
+ cufft.dll, cufftw.dll, cufft.lib, cufftw.lib
615
+
616
+ Mac OSX
617
+
618
+ libcufft.dylib, libcufft_static.a, libcufftw.dylib,
619
+ libcufftw_static.a
620
+
621
+ Linux
622
+
623
+ libcufft.so, libcufft_static.a, libcufftw.so,
624
+ libcufftw_static.a
625
+
626
+ Android
627
+
628
+ libcufft.so, libcufft_static.a, libcufftw.so,
629
+ libcufftw_static.a
630
+
631
+ Component
632
+
633
+ CUDA BLAS Library
634
+
635
+ Windows
636
+
637
+ cublas.dll, cublasLt.dll
638
+
639
+ Mac OSX
640
+
641
+ libcublas.dylib, libcublasLt.dylib, libcublas_static.a,
642
+ libcublasLt_static.a
643
+
644
+ Linux
645
+
646
+ libcublas.so, libcublasLt.so, libcublas_static.a,
647
+ libcublasLt_static.a
648
+
649
+ Android
650
+
651
+ libcublas.so, libcublasLt.so, libcublas_static.a,
652
+ libcublasLt_static.a
653
+
654
+ Component
655
+
656
+ NVIDIA "Drop-in" BLAS Library
657
+
658
+ Windows
659
+
660
+ nvblas.dll
661
+
662
+ Mac OSX
663
+
664
+ libnvblas.dylib
665
+
666
+ Linux
667
+
668
+ libnvblas.so
669
+
670
+ Component
671
+
672
+ CUDA Sparse Matrix Library
673
+
674
+ Windows
675
+
676
+ cusparse.dll, cusparse.lib
677
+
678
+ Mac OSX
679
+
680
+ libcusparse.dylib, libcusparse_static.a
681
+
682
+ Linux
683
+
684
+ libcusparse.so, libcusparse_static.a
685
+
686
+ Android
687
+
688
+ libcusparse.so, libcusparse_static.a
689
+
690
+ Component
691
+
692
+ CUDA Linear Solver Library
693
+
694
+ Windows
695
+
696
+ cusolver.dll, cusolver.lib
697
+
698
+ Mac OSX
699
+
700
+ libcusolver.dylib, libcusolver_static.a
701
+
702
+ Linux
703
+
704
+ libcusolver.so, libcusolver_static.a
705
+
706
+ Android
707
+
708
+ libcusolver.so, libcusolver_static.a
709
+
710
+ Component
711
+
712
+ CUDA Random Number Generation Library
713
+
714
+ Windows
715
+
716
+ curand.dll, curand.lib
717
+
718
+ Mac OSX
719
+
720
+ libcurand.dylib, libcurand_static.a
721
+
722
+ Linux
723
+
724
+ libcurand.so, libcurand_static.a
725
+
726
+ Android
727
+
728
+ libcurand.so, libcurand_static.a
729
+
730
+ Component
731
+
732
+ CUDA Accelerated Graph Library
733
+
734
+ Component
735
+
736
+ NVIDIA Performance Primitives Library
737
+
738
+ Windows
739
+
740
+ nppc.dll, nppc.lib, nppial.dll, nppial.lib, nppicc.dll,
741
+ nppicc.lib, nppicom.dll, nppicom.lib, nppidei.dll,
742
+ nppidei.lib, nppif.dll, nppif.lib, nppig.dll, nppig.lib,
743
+ nppim.dll, nppim.lib, nppist.dll, nppist.lib, nppisu.dll,
744
+ nppisu.lib, nppitc.dll, nppitc.lib, npps.dll, npps.lib
745
+
746
+ Mac OSX
747
+
748
+ libnppc.dylib, libnppc_static.a, libnppial.dylib,
749
+ libnppial_static.a, libnppicc.dylib, libnppicc_static.a,
750
+ libnppicom.dylib, libnppicom_static.a, libnppidei.dylib,
751
+ libnppidei_static.a, libnppif.dylib, libnppif_static.a,
752
+ libnppig.dylib, libnppig_static.a, libnppim.dylib,
753
+ libnppisu_static.a, libnppitc.dylib, libnppitc_static.a,
754
+ libnpps.dylib, libnpps_static.a
755
+
756
+ Linux
757
+
758
+ libnppc.so, libnppc_static.a, libnppial.so,
759
+ libnppial_static.a, libnppicc.so, libnppicc_static.a,
760
+ libnppicom.so, libnppicom_static.a, libnppidei.so,
761
+ libnppidei_static.a, libnppif.so, libnppif_static.a
762
+ libnppig.so, libnppig_static.a, libnppim.so,
763
+ libnppim_static.a, libnppist.so, libnppist_static.a,
764
+ libnppisu.so, libnppisu_static.a, libnppitc.so
765
+ libnppitc_static.a, libnpps.so, libnpps_static.a
766
+
767
+ Android
768
+
769
+ libnppc.so, libnppc_static.a, libnppial.so,
770
+ libnppial_static.a, libnppicc.so, libnppicc_static.a,
771
+ libnppicom.so, libnppicom_static.a, libnppidei.so,
772
+ libnppidei_static.a, libnppif.so, libnppif_static.a
773
+ libnppig.so, libnppig_static.a, libnppim.so,
774
+ libnppim_static.a, libnppist.so, libnppist_static.a,
775
+ libnppisu.so, libnppisu_static.a, libnppitc.so
776
+ libnppitc_static.a, libnpps.so, libnpps_static.a
777
+
778
+ Component
779
+
780
+ NVIDIA JPEG Library
781
+
782
+ Linux
783
+
784
+ libnvjpeg.so, libnvjpeg_static.a
785
+
786
+ Component
787
+
788
+ Internal common library required for statically linking to
789
+ cuBLAS, cuSPARSE, cuFFT, cuRAND, nvJPEG and NPP
790
+
791
+ Mac OSX
792
+
793
+ libculibos.a
794
+
795
+ Linux
796
+
797
+ libculibos.a
798
+
799
+ Component
800
+
801
+ NVIDIA Runtime Compilation Library and Header
802
+
803
+ All
804
+
805
+ nvrtc.h
806
+
807
+ Windows
808
+
809
+ nvrtc.dll, nvrtc-builtins.dll
810
+
811
+ Mac OSX
812
+
813
+ libnvrtc.dylib, libnvrtc-builtins.dylib
814
+
815
+ Linux
816
+
817
+ libnvrtc.so, libnvrtc-builtins.so
818
+
819
+ Component
820
+
821
+ NVIDIA Optimizing Compiler Library
822
+
823
+ Windows
824
+
825
+ nvvm.dll
826
+
827
+ Mac OSX
828
+
829
+ libnvvm.dylib
830
+
831
+ Linux
832
+
833
+ libnvvm.so
834
+
835
+ Component
836
+
837
+ NVIDIA Common Device Math Functions Library
838
+
839
+ Windows
840
+
841
+ libdevice.10.bc
842
+
843
+ Mac OSX
844
+
845
+ libdevice.10.bc
846
+
847
+ Linux
848
+
849
+ libdevice.10.bc
850
+
851
+ Component
852
+
853
+ CUDA Occupancy Calculation Header Library
854
+
855
+ All
856
+
857
+ cuda_occupancy.h
858
+
859
+ Component
860
+
861
+ CUDA Half Precision Headers
862
+
863
+ All
864
+
865
+ cuda_fp16.h, cuda_fp16.hpp
866
+
867
+ Component
868
+
869
+ CUDA Profiling Tools Interface (CUPTI) Library
870
+
871
+ Windows
872
+
873
+ cupti.dll
874
+
875
+ Mac OSX
876
+
877
+ libcupti.dylib
878
+
879
+ Linux
880
+
881
+ libcupti.so
882
+
883
+ Component
884
+
885
+ NVIDIA Tools Extension Library
886
+
887
+ Windows
888
+
889
+ nvToolsExt.dll, nvToolsExt.lib
890
+
891
+ Mac OSX
892
+
893
+ libnvToolsExt.dylib
894
+
895
+ Linux
896
+
897
+ libnvToolsExt.so
898
+
899
+ Component
900
+
901
+ NVIDIA CUDA Driver Libraries
902
+
903
+ Linux
904
+
905
+ libcuda.so, libnvidia-fatbinaryloader.so,
906
+ libnvidia-ptxjitcompiler.so
907
+
908
+ The NVIDIA CUDA Driver Libraries are only distributable in
909
+ applications that meet this criteria:
910
+
911
+ 1. The application was developed starting from a NVIDIA CUDA
912
+ container obtained from Docker Hub or the NVIDIA GPU
913
+ Cloud, and
914
+
915
+ 2. The resulting application is packaged as a Docker
916
+ container and distributed to users on Docker Hub or the
917
+ NVIDIA GPU Cloud only.
918
+
919
+
920
+ 2.7. Attachment B
921
+
922
+
923
+ Additional Licensing Obligations
924
+
925
+ The following third party components included in the SOFTWARE
926
+ are licensed to Licensee pursuant to the following terms and
927
+ conditions:
928
+
929
+ 1. Licensee's use of the GDB third party component is
930
+ subject to the terms and conditions of GNU GPL v3:
931
+
932
+ This product includes copyrighted third-party software licensed
933
+ under the terms of the GNU General Public License v3 ("GPL v3").
934
+ All third-party software packages are copyright by their respective
935
+ authors. GPL v3 terms and conditions are hereby incorporated into
936
+ the Agreement by this reference: http://www.gnu.org/licenses/gpl.txt
937
+
938
+ Consistent with these licensing requirements, the software
939
+ listed below is provided under the terms of the specified
940
+ open source software licenses. To obtain source code for
941
+ software provided under licenses that require
942
+ redistribution of source code, including the GNU General
943
+ Public License (GPL) and GNU Lesser General Public License
944
+ (LGPL), contact [email protected]. This offer is
945
+ valid for a period of three (3) years from the date of the
946
+ distribution of this product by NVIDIA CORPORATION.
947
+
948
+ Component License
949
+ CUDA-GDB GPL v3
950
+
951
+ 2. Licensee represents and warrants that any and all third
952
+ party licensing and/or royalty payment obligations in
953
+ connection with Licensee's use of the H.264 video codecs
954
+ are solely the responsibility of Licensee.
955
+
956
+ 3. Licensee's use of the Thrust library is subject to the
957
+ terms and conditions of the Apache License Version 2.0.
958
+ All third-party software packages are copyright by their
959
+ respective authors. Apache License Version 2.0 terms and
960
+ conditions are hereby incorporated into the Agreement by
961
+ this reference.
962
+ http://www.apache.org/licenses/LICENSE-2.0.html
963
+
964
+ In addition, Licensee acknowledges the following notice:
965
+ Thrust includes source code from the Boost Iterator,
966
+ Tuple, System, and Random Number libraries.
967
+
968
+ Boost Software License - Version 1.0 - August 17th, 2003
969
+ . . . .
970
+
971
+ Permission is hereby granted, free of charge, to any person or
972
+ organization obtaining a copy of the software and accompanying
973
+ documentation covered by this license (the "Software") to use,
974
+ reproduce, display, distribute, execute, and transmit the Software,
975
+ and to prepare derivative works of the Software, and to permit
976
+ third-parties to whom the Software is furnished to do so, all
977
+ subject to the following:
978
+
979
+ The copyright notices in the Software and this entire statement,
980
+ including the above license grant, this restriction and the following
981
+ disclaimer, must be included in all copies of the Software, in whole
982
+ or in part, and all derivative works of the Software, unless such
983
+ copies or derivative works are solely in the form of machine-executable
984
+ object code generated by a source language processor.
985
+
986
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
987
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
988
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
989
+ NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
990
+ ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
991
+ OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
992
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
993
+ OTHER DEALINGS IN THE SOFTWARE.
994
+
995
+ 4. Licensee's use of the LLVM third party component is
996
+ subject to the following terms and conditions:
997
+
998
+ ======================================================
999
+ LLVM Release License
1000
+ ======================================================
1001
+ University of Illinois/NCSA
1002
+ Open Source License
1003
+
1004
+ Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign.
1005
+ All rights reserved.
1006
+
1007
+ Developed by:
1008
+
1009
+ LLVM Team
1010
+
1011
+ University of Illinois at Urbana-Champaign
1012
+
1013
+ http://llvm.org
1014
+
1015
+ Permission is hereby granted, free of charge, to any person obtaining a copy
1016
+ of this software and associated documentation files (the "Software"), to
1017
+ deal with the Software without restriction, including without limitation the
1018
+ rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
1019
+ sell copies of the Software, and to permit persons to whom the Software is
1020
+ furnished to do so, subject to the following conditions:
1021
+
1022
+ * Redistributions of source code must retain the above copyright notice,
1023
+ this list of conditions and the following disclaimers.
1024
+
1025
+ * Redistributions in binary form must reproduce the above copyright
1026
+ notice, this list of conditions and the following disclaimers in the
1027
+ documentation and/or other materials provided with the distribution.
1028
+
1029
+ * Neither the names of the LLVM Team, University of Illinois at Urbana-
1030
+ Champaign, nor the names of its contributors may be used to endorse or
1031
+ promote products derived from this Software without specific prior
1032
+ written permission.
1033
+
1034
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1035
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1036
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
1037
+ THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
1038
+ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
1039
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
1040
+ DEALINGS WITH THE SOFTWARE.
1041
+
1042
+ 5. Licensee's use (e.g. nvprof) of the PCRE third party
1043
+ component is subject to the following terms and
1044
+ conditions:
1045
+
1046
+ ------------
1047
+ PCRE LICENCE
1048
+ ------------
1049
+ PCRE is a library of functions to support regular expressions whose syntax
1050
+ and semantics are as close as possible to those of the Perl 5 language.
1051
+ Release 8 of PCRE is distributed under the terms of the "BSD" licence, as
1052
+ specified below. The documentation for PCRE, supplied in the "doc"
1053
+ directory, is distributed under the same terms as the software itself. The
1054
+ basic library functions are written in C and are freestanding. Also
1055
+ included in the distribution is a set of C++ wrapper functions, and a just-
1056
+ in-time compiler that can be used to optimize pattern matching. These are
1057
+ both optional features that can be omitted when the library is built.
1058
+
1059
+ THE BASIC LIBRARY FUNCTIONS
1060
+ ---------------------------
1061
+ Written by: Philip Hazel
1062
+ Email local part: ph10
1063
+ Email domain: cam.ac.uk
1064
+ University of Cambridge Computing Service,
1065
+ Cambridge, England.
1066
+ Copyright (c) 1997-2012 University of Cambridge
1067
+ All rights reserved.
1068
+
1069
+ PCRE JUST-IN-TIME COMPILATION SUPPORT
1070
+ -------------------------------------
1071
+ Written by: Zoltan Herczeg
1072
+ Email local part: hzmester
1073
+ Emain domain: freemail.hu
1074
+ Copyright(c) 2010-2012 Zoltan Herczeg
1075
+ All rights reserved.
1076
+
1077
+ STACK-LESS JUST-IN-TIME COMPILER
1078
+ --------------------------------
1079
+ Written by: Zoltan Herczeg
1080
+ Email local part: hzmester
1081
+ Emain domain: freemail.hu
1082
+ Copyright(c) 2009-2012 Zoltan Herczeg
1083
+ All rights reserved.
1084
+
1085
+ THE C++ WRAPPER FUNCTIONS
1086
+ -------------------------
1087
+ Contributed by: Google Inc.
1088
+ Copyright (c) 2007-2012, Google Inc.
1089
+ All rights reserved.
1090
+
1091
+ THE "BSD" LICENCE
1092
+ -----------------
1093
+ Redistribution and use in source and binary forms, with or without
1094
+ modification, are permitted provided that the following conditions are met:
1095
+
1096
+ * Redistributions of source code must retain the above copyright notice,
1097
+ this list of conditions and the following disclaimer.
1098
+
1099
+ * Redistributions in binary form must reproduce the above copyright
1100
+ notice, this list of conditions and the following disclaimer in the
1101
+ documentation and/or other materials provided with the distribution.
1102
+
1103
+ * Neither the name of the University of Cambridge nor the name of Google
1104
+ Inc. nor the names of their contributors may be used to endorse or
1105
+ promote products derived from this software without specific prior
1106
+ written permission.
1107
+
1108
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
1109
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1110
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1111
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
1112
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1113
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
1114
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
1115
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
1116
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
1117
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1118
+ POSSIBILITY OF SUCH DAMAGE.
1119
+
1120
+ 6. Some of the cuBLAS library routines were written by or
1121
+ derived from code written by Vasily Volkov and are subject
1122
+ to the Modified Berkeley Software Distribution License as
1123
+ follows:
1124
+
1125
+ Copyright (c) 2007-2009, Regents of the University of California
1126
+
1127
+ All rights reserved.
1128
+
1129
+ Redistribution and use in source and binary forms, with or without
1130
+ modification, are permitted provided that the following conditions are
1131
+ met:
1132
+ * Redistributions of source code must retain the above copyright
1133
+ notice, this list of conditions and the following disclaimer.
1134
+ * Redistributions in binary form must reproduce the above
1135
+ copyright notice, this list of conditions and the following
1136
+ disclaimer in the documentation and/or other materials provided
1137
+ with the distribution.
1138
+ * Neither the name of the University of California, Berkeley nor
1139
+ the names of its contributors may be used to endorse or promote
1140
+ products derived from this software without specific prior
1141
+ written permission.
1142
+
1143
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
1144
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1145
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1146
+ DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
1147
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1148
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
1149
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
1150
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
1151
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
1152
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1153
+ POSSIBILITY OF SUCH DAMAGE.
1154
+
1155
+ 7. Some of the cuBLAS library routines were written by or
1156
+ derived from code written by Davide Barbieri and are
1157
+ subject to the Modified Berkeley Software Distribution
1158
+ License as follows:
1159
+
1160
+ Copyright (c) 2008-2009 Davide Barbieri @ University of Rome Tor Vergata.
1161
+
1162
+ All rights reserved.
1163
+
1164
+ Redistribution and use in source and binary forms, with or without
1165
+ modification, are permitted provided that the following conditions are
1166
+ met:
1167
+ * Redistributions of source code must retain the above copyright
1168
+ notice, this list of conditions and the following disclaimer.
1169
+ * Redistributions in binary form must reproduce the above
1170
+ copyright notice, this list of conditions and the following
1171
+ disclaimer in the documentation and/or other materials provided
1172
+ with the distribution.
1173
+ * The name of the author may not be used to endorse or promote
1174
+ products derived from this software without specific prior
1175
+ written permission.
1176
+
1177
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
1178
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1179
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1180
+ DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
1181
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1182
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
1183
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
1184
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
1185
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
1186
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1187
+ POSSIBILITY OF SUCH DAMAGE.
1188
+
1189
+ 8. Some of the cuBLAS library routines were derived from
1190
+ code developed by the University of Tennessee and are
1191
+ subject to the Modified Berkeley Software Distribution
1192
+ License as follows:
1193
+
1194
+ Copyright (c) 2010 The University of Tennessee.
1195
+
1196
+ All rights reserved.
1197
+
1198
+ Redistribution and use in source and binary forms, with or without
1199
+ modification, are permitted provided that the following conditions are
1200
+ met:
1201
+ * Redistributions of source code must retain the above copyright
1202
+ notice, this list of conditions and the following disclaimer.
1203
+ * Redistributions in binary form must reproduce the above
1204
+ copyright notice, this list of conditions and the following
1205
+ disclaimer listed in this license in the documentation and/or
1206
+ other materials provided with the distribution.
1207
+ * Neither the name of the copyright holders nor the names of its
1208
+ contributors may be used to endorse or promote products derived
1209
+ from this software without specific prior written permission.
1210
+
1211
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1212
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1213
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1214
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1215
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1216
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1217
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1218
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1219
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1220
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1221
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1222
+
1223
+ 9. Some of the cuBLAS library routines were written by or
1224
+ derived from code written by Jonathan Hogg and are subject
1225
+ to the Modified Berkeley Software Distribution License as
1226
+ follows:
1227
+
1228
+ Copyright (c) 2012, The Science and Technology Facilities Council (STFC).
1229
+
1230
+ All rights reserved.
1231
+
1232
+ Redistribution and use in source and binary forms, with or without
1233
+ modification, are permitted provided that the following conditions are
1234
+ met:
1235
+ * Redistributions of source code must retain the above copyright
1236
+ notice, this list of conditions and the following disclaimer.
1237
+ * Redistributions in binary form must reproduce the above
1238
+ copyright notice, this list of conditions and the following
1239
+ disclaimer in the documentation and/or other materials provided
1240
+ with the distribution.
1241
+ * Neither the name of the STFC nor the names of its contributors
1242
+ may be used to endorse or promote products derived from this
1243
+ software without specific prior written permission.
1244
+
1245
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1246
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1247
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1248
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE STFC BE
1249
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1250
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
1251
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
1252
+ BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
1253
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
1254
+ OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
1255
+ IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1256
+
1257
+ 10. Some of the cuBLAS library routines were written by or
1258
+ derived from code written by Ahmad M. Abdelfattah, David
1259
+ Keyes, and Hatem Ltaief, and are subject to the Apache
1260
+ License, Version 2.0, as follows:
1261
+
1262
+ -- (C) Copyright 2013 King Abdullah University of Science and Technology
1263
+ Authors:
1264
+ Ahmad Abdelfattah ([email protected])
1265
+ David Keyes ([email protected])
1266
+ Hatem Ltaief ([email protected])
1267
+
1268
+ Redistribution and use in source and binary forms, with or without
1269
+ modification, are permitted provided that the following conditions
1270
+ are met:
1271
+
1272
+ * Redistributions of source code must retain the above copyright
1273
+ notice, this list of conditions and the following disclaimer.
1274
+ * Redistributions in binary form must reproduce the above copyright
1275
+ notice, this list of conditions and the following disclaimer in the
1276
+ documentation and/or other materials provided with the distribution.
1277
+ * Neither the name of the King Abdullah University of Science and
1278
+ Technology nor the names of its contributors may be used to endorse
1279
+ or promote products derived from this software without specific prior
1280
+ written permission.
1281
+
1282
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1283
+ ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1284
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1285
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1286
+ HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1287
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1288
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1289
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1290
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1291
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1292
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
1293
+
1294
+ 11. Some of the cuSPARSE library routines were written by or
1295
+ derived from code written by Li-Wen Chang and are subject
1296
+ to the NCSA Open Source License as follows:
1297
+
1298
+ Copyright (c) 2012, University of Illinois.
1299
+
1300
+ All rights reserved.
1301
+
1302
+ Developed by: IMPACT Group, University of Illinois, http://impact.crhc.illinois.edu
1303
+
1304
+ Permission is hereby granted, free of charge, to any person obtaining
1305
+ a copy of this software and associated documentation files (the
1306
+ "Software"), to deal with the Software without restriction, including
1307
+ without limitation the rights to use, copy, modify, merge, publish,
1308
+ distribute, sublicense, and/or sell copies of the Software, and to
1309
+ permit persons to whom the Software is furnished to do so, subject to
1310
+ the following conditions:
1311
+ * Redistributions of source code must retain the above copyright
1312
+ notice, this list of conditions and the following disclaimer.
1313
+ * Redistributions in binary form must reproduce the above
1314
+ copyright notice, this list of conditions and the following
1315
+ disclaimers in the documentation and/or other materials provided
1316
+ with the distribution.
1317
+ * Neither the names of IMPACT Group, University of Illinois, nor
1318
+ the names of its contributors may be used to endorse or promote
1319
+ products derived from this Software without specific prior
1320
+ written permission.
1321
+
1322
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1323
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1324
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
1325
+ NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
1326
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
1327
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
1328
+ IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
1329
+ SOFTWARE.
1330
+
1331
+ 12. Some of the cuRAND library routines were written by or
1332
+ derived from code written by Mutsuo Saito and Makoto
1333
+ Matsumoto and are subject to the following license:
1334
+
1335
+ Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima
1336
+ University. All rights reserved.
1337
+
1338
+ Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima
1339
+ University and University of Tokyo. All rights reserved.
1340
+
1341
+ Redistribution and use in source and binary forms, with or without
1342
+ modification, are permitted provided that the following conditions are
1343
+ met:
1344
+ * Redistributions of source code must retain the above copyright
1345
+ notice, this list of conditions and the following disclaimer.
1346
+ * Redistributions in binary form must reproduce the above
1347
+ copyright notice, this list of conditions and the following
1348
+ disclaimer in the documentation and/or other materials provided
1349
+ with the distribution.
1350
+ * Neither the name of the Hiroshima University nor the names of
1351
+ its contributors may be used to endorse or promote products
1352
+ derived from this software without specific prior written
1353
+ permission.
1354
+
1355
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1356
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1357
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1358
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1359
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1360
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1361
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1362
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1363
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1364
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1365
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1366
+
1367
+ 13. Some of the cuRAND library routines were derived from
1368
+ code developed by D. E. Shaw Research and are subject to
1369
+ the following license:
1370
+
1371
+ Copyright 2010-2011, D. E. Shaw Research.
1372
+
1373
+ All rights reserved.
1374
+
1375
+ Redistribution and use in source and binary forms, with or without
1376
+ modification, are permitted provided that the following conditions are
1377
+ met:
1378
+ * Redistributions of source code must retain the above copyright
1379
+ notice, this list of conditions, and the following disclaimer.
1380
+ * Redistributions in binary form must reproduce the above
1381
+ copyright notice, this list of conditions, and the following
1382
+ disclaimer in the documentation and/or other materials provided
1383
+ with the distribution.
1384
+ * Neither the name of D. E. Shaw Research nor the names of its
1385
+ contributors may be used to endorse or promote products derived
1386
+ from this software without specific prior written permission.
1387
+
1388
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1389
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1390
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1391
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1392
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1393
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1394
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1395
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1396
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1397
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1398
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1399
+
1400
+ 14. Some of the Math library routines were written by or
1401
+ derived from code developed by Norbert Juffa and are
1402
+ subject to the following license:
1403
+
1404
+ Copyright (c) 2015-2017, Norbert Juffa
1405
+ All rights reserved.
1406
+
1407
+ Redistribution and use in source and binary forms, with or without
1408
+ modification, are permitted provided that the following conditions
1409
+ are met:
1410
+
1411
+ 1. Redistributions of source code must retain the above copyright
1412
+ notice, this list of conditions and the following disclaimer.
1413
+
1414
+ 2. Redistributions in binary form must reproduce the above copyright
1415
+ notice, this list of conditions and the following disclaimer in the
1416
+ documentation and/or other materials provided with the distribution.
1417
+
1418
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1419
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1420
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1421
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1422
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1423
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1424
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1425
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1426
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1427
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1428
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1429
+
1430
+ 15. Licensee's use of the lz4 third party component is
1431
+ subject to the following terms and conditions:
1432
+
1433
+ Copyright (C) 2011-2013, Yann Collet.
1434
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
1435
+
1436
+ Redistribution and use in source and binary forms, with or without
1437
+ modification, are permitted provided that the following conditions are
1438
+ met:
1439
+
1440
+ * Redistributions of source code must retain the above copyright
1441
+ notice, this list of conditions and the following disclaimer.
1442
+ * Redistributions in binary form must reproduce the above
1443
+ copyright notice, this list of conditions and the following disclaimer
1444
+ in the documentation and/or other materials provided with the
1445
+ distribution.
1446
+
1447
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1448
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1449
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1450
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1451
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1452
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1453
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1454
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1455
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1456
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1457
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1458
+
1459
+ 16. The NPP library uses code from the Boost Math Toolkit,
1460
+ and is subject to the following license:
1461
+
1462
+ Boost Software License - Version 1.0 - August 17th, 2003
1463
+ . . . .
1464
+
1465
+ Permission is hereby granted, free of charge, to any person or
1466
+ organization obtaining a copy of the software and accompanying
1467
+ documentation covered by this license (the "Software") to use,
1468
+ reproduce, display, distribute, execute, and transmit the Software,
1469
+ and to prepare derivative works of the Software, and to permit
1470
+ third-parties to whom the Software is furnished to do so, all
1471
+ subject to the following:
1472
+
1473
+ The copyright notices in the Software and this entire statement,
1474
+ including the above license grant, this restriction and the following
1475
+ disclaimer, must be included in all copies of the Software, in whole
1476
+ or in part, and all derivative works of the Software, unless such
1477
+ copies or derivative works are solely in the form of machine-executable
1478
+ object code generated by a source language processor.
1479
+
1480
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1481
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1482
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
1483
+ NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
1484
+ ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
1485
+ OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
1486
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
1487
+ OTHER DEALINGS IN THE SOFTWARE.
1488
+
1489
+ 17. Portions of the Nsight Eclipse Edition is subject to the
1490
+ following license:
1491
+
1492
+ The Eclipse Foundation makes available all content in this plug-in
1493
+ ("Content"). Unless otherwise indicated below, the Content is provided
1494
+ to you under the terms and conditions of the Eclipse Public License
1495
+ Version 1.0 ("EPL"). A copy of the EPL is available at http://
1496
+ www.eclipse.org/legal/epl-v10.html. For purposes of the EPL, "Program"
1497
+ will mean the Content.
1498
+
1499
+ If you did not receive this Content directly from the Eclipse
1500
+ Foundation, the Content is being redistributed by another party
1501
+ ("Redistributor") and different terms and conditions may apply to your
1502
+ use of any object code in the Content. Check the Redistributor's
1503
+ license that was provided with the Content. If no such license exists,
1504
+ contact the Redistributor. Unless otherwise indicated below, the terms
1505
+ and conditions of the EPL still apply to any source code in the
1506
+ Content and such source code may be obtained at http://www.eclipse.org.
1507
+
1508
+ 18. Some of the cuBLAS library routines uses code from
1509
+ OpenAI, which is subject to the following license:
1510
+
1511
+ License URL
1512
+ https://github.com/openai/openai-gemm/blob/master/LICENSE
1513
+
1514
+ License Text
1515
+ The MIT License
1516
+
1517
+ Copyright (c) 2016 OpenAI (http://openai.com), 2016 Google Inc.
1518
+
1519
+ Permission is hereby granted, free of charge, to any person obtaining a copy
1520
+ of this software and associated documentation files (the "Software"), to deal
1521
+ in the Software without restriction, including without limitation the rights
1522
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
1523
+ copies of the Software, and to permit persons to whom the Software is
1524
+ furnished to do so, subject to the following conditions:
1525
+
1526
+ The above copyright notice and this permission notice shall be included in
1527
+ all copies or substantial portions of the Software.
1528
+
1529
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1530
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1531
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
1532
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1533
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
1534
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
1535
+ THE SOFTWARE.
1536
+
1537
+ 19. Licensee's use of the Visual Studio Setup Configuration
1538
+ Samples is subject to the following license:
1539
+
1540
+ The MIT License (MIT)
1541
+ Copyright (C) Microsoft Corporation. All rights reserved.
1542
+
1543
+ Permission is hereby granted, free of charge, to any person
1544
+ obtaining a copy of this software and associated documentation
1545
+ files (the "Software"), to deal in the Software without restriction,
1546
+ including without limitation the rights to use, copy, modify, merge,
1547
+ publish, distribute, sublicense, and/or sell copies of the Software,
1548
+ and to permit persons to whom the Software is furnished to do so,
1549
+ subject to the following conditions:
1550
+
1551
+ The above copyright notice and this permission notice shall be included
1552
+ in all copies or substantial portions of the Software.
1553
+
1554
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
1555
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1556
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
1557
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1558
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
1559
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
1560
+
1561
+ 20. Licensee's use of linmath.h header for CPU functions for
1562
+ GL vector/matrix operations from lunarG is subject to the
1563
+ Apache License Version 2.0.
1564
+
1565
+ 21. The DX12-CUDA sample uses the d3dx12.h header, which is
1566
+ subject to the MIT license .
1567
+
1568
+ -----------------
venv/lib/python3.10/site-packages/nvidia_cublas_cu12-12.1.3.1.dist-info/METADATA ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: nvidia-cublas-cu12
3
+ Version: 12.1.3.1
4
+ Summary: CUBLAS native runtime libraries
5
+ Home-page: https://developer.nvidia.com/cuda-zone
6
+ Author: Nvidia CUDA Installer Team
7
+ Author-email: [email protected]
8
+ License: NVIDIA Proprietary Software
9
+ Keywords: cuda,nvidia,runtime,machine learning,deep learning
10
+ Classifier: Development Status :: 4 - Beta
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Intended Audience :: Education
13
+ Classifier: Intended Audience :: Science/Research
14
+ Classifier: License :: Other/Proprietary License
15
+ Classifier: Natural Language :: English
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3.5
18
+ Classifier: Programming Language :: Python :: 3.6
19
+ Classifier: Programming Language :: Python :: 3.7
20
+ Classifier: Programming Language :: Python :: 3.8
21
+ Classifier: Programming Language :: Python :: 3.9
22
+ Classifier: Programming Language :: Python :: 3.10
23
+ Classifier: Programming Language :: Python :: 3.11
24
+ Classifier: Programming Language :: Python :: 3 :: Only
25
+ Classifier: Topic :: Scientific/Engineering
26
+ Classifier: Topic :: Scientific/Engineering :: Mathematics
27
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
28
+ Classifier: Topic :: Software Development
29
+ Classifier: Topic :: Software Development :: Libraries
30
+ Classifier: Operating System :: Microsoft :: Windows
31
+ Classifier: Operating System :: POSIX :: Linux
32
+ Requires-Python: >=3
33
+ License-File: License.txt
34
+
35
+ CUBLAS native runtime libraries
venv/lib/python3.10/site-packages/nvidia_cublas_cu12-12.1.3.1.dist-info/RECORD ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ nvidia/__pycache__/__init__.cpython-310.pyc,,
3
+ nvidia/cublas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ nvidia/cublas/__pycache__/__init__.cpython-310.pyc,,
5
+ nvidia/cublas/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ nvidia/cublas/include/__pycache__/__init__.cpython-310.pyc,,
7
+ nvidia/cublas/include/cublas.h,sha256=a0lLqy-k47NuwyDjuueC3W0Mpc908MTU7o5sMJqE-1w,41246
8
+ nvidia/cublas/include/cublasLt.h,sha256=Qadag9UccOwt6czAl1q89MMJZkddB2U9z0KUXoitoLc,76626
9
+ nvidia/cublas/include/cublasXt.h,sha256=CW9dyXYGSUW1wEXrVVyhU6OxBK1PUvMoYdVGlQT7L9A,37380
10
+ nvidia/cublas/include/cublas_api.h,sha256=hV93oe_IH7Y7nvEwDNw37ASJUKDkdgsTAQr0szvJinA,364749
11
+ nvidia/cublas/include/cublas_v2.h,sha256=qxMdB5jb97luEfw61LEAB-Wlr8A9DLBvO4rRypDCNKw,15460
12
+ nvidia/cublas/include/nvblas.h,sha256=dXCLR-2oUiJFzLsDtIAK09m42ct4G0HWdYzBUuDPXpc,23341
13
+ nvidia/cublas/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
+ nvidia/cublas/lib/__pycache__/__init__.cpython-310.pyc,,
15
+ nvidia/cublas/lib/libcublas.so.12,sha256=N9EUERHWuTWqhBVq1h_TV1NQytjWnQkW6dt9N-75uBY,107473968
16
+ nvidia/cublas/lib/libcublasLt.so.12,sha256=-Xv8LPddxA2mUOuXr_Y9PhlfUAzGI8dPP-M84s4rcfQ,515090264
17
+ nvidia/cublas/lib/libnvblas.so.12,sha256=-F6UXvUxzDZgOYkEFZTzu3GhriKr17JYveDEgTrrxWE,737048
18
+ nvidia_cublas_cu12-12.1.3.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
19
+ nvidia_cublas_cu12-12.1.3.1.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262
20
+ nvidia_cublas_cu12-12.1.3.1.dist-info/METADATA,sha256=88crLgU_Dos_nCVea8NfW27kik3GFl4N92xUelGbPkw,1505
21
+ nvidia_cublas_cu12-12.1.3.1.dist-info/RECORD,,
22
+ nvidia_cublas_cu12-12.1.3.1.dist-info/WHEEL,sha256=-kQi_VMfvRQozZJT7HUPMfY-5vLo0LVTmAylNJ3Ft98,106
23
+ nvidia_cublas_cu12-12.1.3.1.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7
venv/lib/python3.10/site-packages/nvidia_cublas_cu12-12.1.3.1.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.37.1)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-manylinux1_x86_64
5
+
venv/lib/python3.10/site-packages/nvidia_cublas_cu12-12.1.3.1.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ nvidia
venv/lib/python3.10/site-packages/peft/__init__.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+ # There's no way to ignore "F401 '...' imported but unused" warnings in this
3
+ # module, but to preserve other warnings. So, don't check this module at all.
4
+
5
+ # coding=utf-8
6
+ # Copyright 2023-present the HuggingFace Inc. team.
7
+ #
8
+ # Licensed under the Apache License, Version 2.0 (the "License");
9
+ # you may not use this file except in compliance with the License.
10
+ # You may obtain a copy of the License at
11
+ #
12
+ # http://www.apache.org/licenses/LICENSE-2.0
13
+ #
14
+ # Unless required by applicable law or agreed to in writing, software
15
+ # distributed under the License is distributed on an "AS IS" BASIS,
16
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ # See the License for the specific language governing permissions and
18
+ # limitations under the License.
19
+
20
+ __version__ = "0.10.0"
21
+
22
+ from .auto import (
23
+ AutoPeftModel,
24
+ AutoPeftModelForCausalLM,
25
+ AutoPeftModelForSequenceClassification,
26
+ AutoPeftModelForSeq2SeqLM,
27
+ AutoPeftModelForTokenClassification,
28
+ AutoPeftModelForQuestionAnswering,
29
+ AutoPeftModelForFeatureExtraction,
30
+ )
31
+ from .mapping import (
32
+ MODEL_TYPE_TO_PEFT_MODEL_MAPPING,
33
+ PEFT_TYPE_TO_CONFIG_MAPPING,
34
+ get_peft_config,
35
+ get_peft_model,
36
+ inject_adapter_in_model,
37
+ )
38
+ from .mixed_model import PeftMixedModel
39
+ from .peft_model import (
40
+ PeftModel,
41
+ PeftModelForCausalLM,
42
+ PeftModelForSeq2SeqLM,
43
+ PeftModelForSequenceClassification,
44
+ PeftModelForTokenClassification,
45
+ PeftModelForQuestionAnswering,
46
+ PeftModelForFeatureExtraction,
47
+ )
48
+ from .tuners import (
49
+ AdaptionPromptConfig,
50
+ AdaptionPromptModel,
51
+ LoraConfig,
52
+ LoftQConfig,
53
+ LoraModel,
54
+ LoHaConfig,
55
+ LoHaModel,
56
+ LoKrConfig,
57
+ LoKrModel,
58
+ IA3Config,
59
+ IA3Model,
60
+ AdaLoraConfig,
61
+ AdaLoraModel,
62
+ PrefixEncoder,
63
+ PrefixTuningConfig,
64
+ PromptEmbedding,
65
+ PromptEncoder,
66
+ PromptEncoderConfig,
67
+ PromptEncoderReparameterizationType,
68
+ PromptTuningConfig,
69
+ PromptTuningInit,
70
+ MultitaskPromptTuningConfig,
71
+ MultitaskPromptTuningInit,
72
+ OFTConfig,
73
+ OFTModel,
74
+ PolyConfig,
75
+ PolyModel,
76
+ )
77
+ from .utils import (
78
+ TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING,
79
+ PeftType,
80
+ TaskType,
81
+ bloom_model_postprocess_past_key_value,
82
+ get_peft_model_state_dict,
83
+ prepare_model_for_kbit_training,
84
+ replace_lora_weights_loftq,
85
+ set_peft_model_state_dict,
86
+ shift_tokens_right,
87
+ load_peft_weights,
88
+ cast_mixed_precision_params,
89
+ )
90
+ from .config import PeftConfig, PromptLearningConfig
venv/lib/python3.10/site-packages/peft/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.2 kB). View file
 
venv/lib/python3.10/site-packages/peft/__pycache__/auto.cpython-310.pyc ADDED
Binary file (4.85 kB). View file
 
venv/lib/python3.10/site-packages/peft/__pycache__/config.cpython-310.pyc ADDED
Binary file (8.79 kB). View file
 
venv/lib/python3.10/site-packages/peft/__pycache__/helpers.cpython-310.pyc ADDED
Binary file (4.09 kB). View file
 
venv/lib/python3.10/site-packages/peft/__pycache__/import_utils.cpython-310.pyc ADDED
Binary file (1.92 kB). View file
 
venv/lib/python3.10/site-packages/peft/__pycache__/mapping.cpython-310.pyc ADDED
Binary file (4.69 kB). View file
 
venv/lib/python3.10/site-packages/peft/__pycache__/mixed_model.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
venv/lib/python3.10/site-packages/peft/__pycache__/peft_model.cpython-310.pyc ADDED
Binary file (53.5 kB). View file
 
venv/lib/python3.10/site-packages/peft/auto.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from __future__ import annotations
16
+
17
+ import importlib
18
+ import os
19
+ from typing import Optional
20
+
21
+ from transformers import (
22
+ AutoModel,
23
+ AutoModelForCausalLM,
24
+ AutoModelForQuestionAnswering,
25
+ AutoModelForSeq2SeqLM,
26
+ AutoModelForSequenceClassification,
27
+ AutoModelForTokenClassification,
28
+ AutoTokenizer,
29
+ )
30
+
31
+ from .config import PeftConfig
32
+ from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING
33
+ from .peft_model import (
34
+ PeftModel,
35
+ PeftModelForCausalLM,
36
+ PeftModelForFeatureExtraction,
37
+ PeftModelForQuestionAnswering,
38
+ PeftModelForSeq2SeqLM,
39
+ PeftModelForSequenceClassification,
40
+ PeftModelForTokenClassification,
41
+ )
42
+ from .utils.constants import TOKENIZER_CONFIG_NAME
43
+ from .utils.other import check_file_exists_on_hf_hub
44
+
45
+
46
+ class _BaseAutoPeftModel:
47
+ _target_class = None
48
+ _target_peft_class = None
49
+
50
+ def __init__(self, *args, **kwargs):
51
+ # For consistency with transformers: https://github.com/huggingface/transformers/blob/91d7df58b6537d385e90578dac40204cb550f706/src/transformers/models/auto/auto_factory.py#L400
52
+ raise EnvironmentError( # noqa: UP024
53
+ f"{self.__class__.__name__} is designed to be instantiated "
54
+ f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or "
55
+ f"`{self.__class__.__name__}.from_config(config)` methods."
56
+ )
57
+
58
+ @classmethod
59
+ def from_pretrained(
60
+ cls,
61
+ pretrained_model_name_or_path,
62
+ adapter_name: str = "default",
63
+ is_trainable: bool = False,
64
+ config: Optional[PeftConfig] = None,
65
+ **kwargs,
66
+ ):
67
+ r"""
68
+ A wrapper around all the preprocessing steps a user needs to perform in order to load a PEFT model. The kwargs
69
+ are passed along to `PeftConfig` that automatically takes care of filtering the kwargs of the Hub methods and
70
+ the config object init.
71
+ """
72
+ peft_config = PeftConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
73
+ base_model_path = peft_config.base_model_name_or_path
74
+
75
+ task_type = getattr(peft_config, "task_type", None)
76
+
77
+ if cls._target_class is not None:
78
+ target_class = cls._target_class
79
+ elif cls._target_class is None and task_type is not None:
80
+ # this is only in the case where we use `AutoPeftModel`
81
+ raise ValueError(
82
+ "Cannot use `AutoPeftModel` with a task type, please use a specific class for your task type. (e.g. `AutoPeftModelForCausalLM` for `task_type='CAUSAL_LM'`)"
83
+ )
84
+
85
+ if task_type is not None:
86
+ expected_target_class = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[task_type]
87
+ if cls._target_peft_class.__name__ != expected_target_class.__name__:
88
+ raise ValueError(
89
+ f"Expected target PEFT class: {expected_target_class.__name__}, but you have asked for: {cls._target_peft_class.__name__ }"
90
+ " make sure that you are loading the correct model for your task type."
91
+ )
92
+ elif task_type is None and getattr(peft_config, "auto_mapping", None) is not None:
93
+ auto_mapping = getattr(peft_config, "auto_mapping", None)
94
+ base_model_class = auto_mapping["base_model_class"]
95
+ parent_library_name = auto_mapping["parent_library"]
96
+
97
+ parent_library = importlib.import_module(parent_library_name)
98
+ target_class = getattr(parent_library, base_model_class)
99
+ else:
100
+ raise ValueError(
101
+ "Cannot infer the auto class from the config, please make sure that you are loading the correct model for your task type."
102
+ )
103
+
104
+ base_model = target_class.from_pretrained(base_model_path, **kwargs)
105
+
106
+ tokenizer_exists = False
107
+ if os.path.exists(os.path.join(pretrained_model_name_or_path, TOKENIZER_CONFIG_NAME)):
108
+ tokenizer_exists = True
109
+ else:
110
+ token = kwargs.get("token", None)
111
+ if token is None:
112
+ token = kwargs.get("use_auth_token", None)
113
+
114
+ tokenizer_exists = check_file_exists_on_hf_hub(
115
+ repo_id=pretrained_model_name_or_path,
116
+ filename=TOKENIZER_CONFIG_NAME,
117
+ revision=kwargs.get("revision", None),
118
+ repo_type=kwargs.get("repo_type", None),
119
+ token=token,
120
+ )
121
+
122
+ if tokenizer_exists:
123
+ tokenizer = AutoTokenizer.from_pretrained(
124
+ pretrained_model_name_or_path, trust_remote_code=kwargs.get("trust_remote_code", False)
125
+ )
126
+ base_model.resize_token_embeddings(len(tokenizer))
127
+
128
+ return cls._target_peft_class.from_pretrained(
129
+ base_model,
130
+ pretrained_model_name_or_path,
131
+ adapter_name=adapter_name,
132
+ is_trainable=is_trainable,
133
+ config=config,
134
+ **kwargs,
135
+ )
136
+
137
+
138
+ class AutoPeftModel(_BaseAutoPeftModel):
139
+ _target_class = None
140
+ _target_peft_class = PeftModel
141
+
142
+
143
+ class AutoPeftModelForCausalLM(_BaseAutoPeftModel):
144
+ _target_class = AutoModelForCausalLM
145
+ _target_peft_class = PeftModelForCausalLM
146
+
147
+
148
+ class AutoPeftModelForSeq2SeqLM(_BaseAutoPeftModel):
149
+ _target_class = AutoModelForSeq2SeqLM
150
+ _target_peft_class = PeftModelForSeq2SeqLM
151
+
152
+
153
+ class AutoPeftModelForSequenceClassification(_BaseAutoPeftModel):
154
+ _target_class = AutoModelForSequenceClassification
155
+ _target_peft_class = PeftModelForSequenceClassification
156
+
157
+
158
+ class AutoPeftModelForTokenClassification(_BaseAutoPeftModel):
159
+ _target_class = AutoModelForTokenClassification
160
+ _target_peft_class = PeftModelForTokenClassification
161
+
162
+
163
+ class AutoPeftModelForQuestionAnswering(_BaseAutoPeftModel):
164
+ _target_class = AutoModelForQuestionAnswering
165
+ _target_peft_class = PeftModelForQuestionAnswering
166
+
167
+
168
+ class AutoPeftModelForFeatureExtraction(_BaseAutoPeftModel):
169
+ _target_class = AutoModel
170
+ _target_peft_class = PeftModelForFeatureExtraction
venv/lib/python3.10/site-packages/peft/config.py ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import inspect
15
+ import json
16
+ import os
17
+ from dataclasses import asdict, dataclass, field
18
+ from typing import Dict, Optional, Union
19
+
20
+ from huggingface_hub import hf_hub_download
21
+ from transformers.utils import PushToHubMixin
22
+
23
+ from .utils import CONFIG_NAME, PeftType, TaskType
24
+
25
+
26
+ @dataclass
27
+ class PeftConfigMixin(PushToHubMixin):
28
+ r"""
29
+ This is the base configuration class for PEFT adapter models. It contains all the methods that are common to all
30
+ PEFT adapter models. This class inherits from [`~transformers.utils.PushToHubMixin`] which contains the methods to
31
+ push your model to the Hub. The method `save_pretrained` will save the configuration of your adapter model in a
32
+ directory. The method `from_pretrained` will load the configuration of your adapter model from a directory.
33
+
34
+ Args:
35
+ peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use.
36
+ """
37
+
38
+ peft_type: Optional[PeftType] = field(default=None, metadata={"help": "The type of PEFT model."})
39
+ auto_mapping: Optional[dict] = field(
40
+ default=None, metadata={"help": "An auto mapping dict to help retrieve the base model class if needed."}
41
+ )
42
+
43
+ def to_dict(self) -> Dict:
44
+ r"""
45
+ Returns the configuration for your adapter model as a dictionary.
46
+ """
47
+ return asdict(self)
48
+
49
+ def save_pretrained(self, save_directory: str, **kwargs) -> None:
50
+ r"""
51
+ This method saves the configuration of your adapter model in a directory.
52
+
53
+ Args:
54
+ save_directory (`str`):
55
+ The directory where the configuration will be saved.
56
+ kwargs (additional keyword arguments, *optional*):
57
+ Additional keyword arguments passed along to the [`~transformers.utils.PushToHubMixin.push_to_hub`]
58
+ method.
59
+ """
60
+ if os.path.isfile(save_directory):
61
+ raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
62
+
63
+ os.makedirs(save_directory, exist_ok=True)
64
+ auto_mapping_dict = kwargs.pop("auto_mapping_dict", None)
65
+
66
+ output_dict = asdict(self)
67
+ # converting set type to list
68
+ for key, value in output_dict.items():
69
+ if isinstance(value, set):
70
+ output_dict[key] = list(value)
71
+
72
+ output_path = os.path.join(save_directory, CONFIG_NAME)
73
+
74
+ # Add auto mapping details for custom models.
75
+ if auto_mapping_dict is not None:
76
+ output_dict["auto_mapping"] = auto_mapping_dict
77
+
78
+ # save it
79
+ with open(output_path, "w") as writer:
80
+ writer.write(json.dumps(output_dict, indent=2, sort_keys=True))
81
+
82
+ @classmethod
83
+ def from_peft_type(cls, **kwargs):
84
+ r"""
85
+ This method loads the configuration of your adapter model from a set of kwargs.
86
+
87
+ The appropriate configuration type is determined by the `peft_type` argument. If `peft_type` is not provided,
88
+ the calling class type is instantiated.
89
+
90
+ Args:
91
+ kwargs (configuration keyword arguments):
92
+ Keyword arguments passed along to the configuration initialization.
93
+ """
94
+ # Avoid circular dependency .. TODO: fix this with a larger refactor
95
+ from peft.mapping import PEFT_TYPE_TO_CONFIG_MAPPING
96
+
97
+ # TODO: this hack is needed to fix the following issue (on commit 702f937):
98
+ # if someone saves a default config and loads it back with `PeftConfig` class it yields to
99
+ # not loading the correct config class.
100
+
101
+ # from peft import AdaLoraConfig, PeftConfig
102
+ # peft_config = AdaLoraConfig()
103
+ # print(peft_config)
104
+ # >>> AdaLoraConfig(peft_type=<PeftType.ADALORA: 'ADALORA'>, auto_mapping=None, base_model_name_or_path=None,
105
+ # revision=None, task_type=None, inference_mode=False, r=8, target_modules=None, lora_alpha=8, lora_dropout=0.0, ...
106
+ #
107
+ # peft_config.save_pretrained("./test_config")
108
+ # peft_config = PeftConfig.from_pretrained("./test_config")
109
+ # print(peft_config)
110
+ # >>> PeftConfig(peft_type='ADALORA', auto_mapping=None, base_model_name_or_path=None, revision=None, task_type=None, inference_mode=False)
111
+
112
+ if "peft_type" in kwargs:
113
+ peft_type = kwargs["peft_type"]
114
+ config_cls = PEFT_TYPE_TO_CONFIG_MAPPING[peft_type]
115
+ else:
116
+ config_cls = cls
117
+
118
+ return config_cls(**kwargs)
119
+
120
+ @classmethod
121
+ def from_pretrained(cls, pretrained_model_name_or_path: str, subfolder: Optional[str] = None, **kwargs):
122
+ r"""
123
+ This method loads the configuration of your adapter model from a directory.
124
+
125
+ Args:
126
+ pretrained_model_name_or_path (`str`):
127
+ The directory or the Hub repository id where the configuration is saved.
128
+ kwargs (additional keyword arguments, *optional*):
129
+ Additional keyword arguments passed along to the child class initialization.
130
+ """
131
+ path = (
132
+ os.path.join(pretrained_model_name_or_path, subfolder)
133
+ if subfolder is not None
134
+ else pretrained_model_name_or_path
135
+ )
136
+
137
+ hf_hub_download_kwargs, class_kwargs, _ = cls._split_kwargs(kwargs)
138
+
139
+ if os.path.isfile(os.path.join(path, CONFIG_NAME)):
140
+ config_file = os.path.join(path, CONFIG_NAME)
141
+ else:
142
+ try:
143
+ config_file = hf_hub_download(
144
+ pretrained_model_name_or_path, CONFIG_NAME, subfolder=subfolder, **hf_hub_download_kwargs
145
+ )
146
+ except Exception:
147
+ raise ValueError(f"Can't find '{CONFIG_NAME}' at '{pretrained_model_name_or_path}'")
148
+
149
+ loaded_attributes = cls.from_json_file(config_file)
150
+ kwargs = {**class_kwargs, **loaded_attributes}
151
+ return cls.from_peft_type(**kwargs)
152
+
153
+ @classmethod
154
+ def from_json_file(cls, path_json_file: str, **kwargs):
155
+ r"""
156
+ Loads a configuration file from a json file.
157
+
158
+ Args:
159
+ path_json_file (`str`):
160
+ The path to the json file.
161
+ """
162
+ with open(path_json_file) as file:
163
+ json_object = json.load(file)
164
+
165
+ return json_object
166
+
167
+ @classmethod
168
+ def _split_kwargs(cls, kwargs):
169
+ hf_hub_download_kwargs = {}
170
+ class_kwargs = {}
171
+ other_kwargs = {}
172
+
173
+ for key, value in kwargs.items():
174
+ if key in inspect.signature(hf_hub_download).parameters:
175
+ hf_hub_download_kwargs[key] = value
176
+ elif key in list(cls.__annotations__):
177
+ class_kwargs[key] = value
178
+ else:
179
+ other_kwargs[key] = value
180
+
181
+ return hf_hub_download_kwargs, class_kwargs, other_kwargs
182
+
183
+ @classmethod
184
+ def _get_peft_type(
185
+ cls,
186
+ model_id: str,
187
+ **hf_hub_download_kwargs,
188
+ ):
189
+ subfolder = hf_hub_download_kwargs.get("subfolder", None)
190
+
191
+ path = os.path.join(model_id, subfolder) if subfolder is not None else model_id
192
+
193
+ if os.path.isfile(os.path.join(path, CONFIG_NAME)):
194
+ config_file = os.path.join(path, CONFIG_NAME)
195
+ else:
196
+ try:
197
+ config_file = hf_hub_download(
198
+ model_id,
199
+ CONFIG_NAME,
200
+ **hf_hub_download_kwargs,
201
+ )
202
+ except Exception:
203
+ raise ValueError(f"Can't find '{CONFIG_NAME}' at '{model_id}'")
204
+
205
+ loaded_attributes = cls.from_json_file(config_file)
206
+ return loaded_attributes["peft_type"]
207
+
208
+ @property
209
+ def is_prompt_learning(self) -> bool:
210
+ r"""
211
+ Utility method to check if the configuration is for prompt learning.
212
+ """
213
+ return False
214
+
215
+ @property
216
+ def is_adaption_prompt(self) -> bool:
217
+ """Return True if this is an adaption prompt config."""
218
+ return False
219
+
220
+
221
+ @dataclass
222
+ class PeftConfig(PeftConfigMixin):
223
+ """
224
+ This is the base configuration class to store the configuration of a [`PeftModel`].
225
+
226
+ Args:
227
+ peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use.
228
+ task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform.
229
+ inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode.
230
+ """
231
+
232
+ base_model_name_or_path: Optional[str] = field(
233
+ default=None, metadata={"help": "The name of the base model to use."}
234
+ )
235
+ revision: Optional[str] = field(default=None, metadata={"help": "The specific model version to use."})
236
+ peft_type: Optional[Union[str, PeftType]] = field(default=None, metadata={"help": "Peft type"})
237
+ task_type: Optional[Union[str, TaskType]] = field(default=None, metadata={"help": "Task type"})
238
+ inference_mode: bool = field(default=False, metadata={"help": "Whether to use inference mode"})
239
+
240
+
241
+ @dataclass
242
+ class PromptLearningConfig(PeftConfig):
243
+ """
244
+ This is the base configuration class to store the configuration of [`PrefixTuning`], [`PromptEncoder`], or
245
+ [`PromptTuning`].
246
+
247
+ Args:
248
+ num_virtual_tokens (`int`): The number of virtual tokens to use.
249
+ token_dim (`int`): The hidden embedding dimension of the base transformer model.
250
+ num_transformer_submodules (`int`): The number of transformer submodules in the base transformer model.
251
+ num_attention_heads (`int`): The number of attention heads in the base transformer model.
252
+ num_layers (`int`): The number of layers in the base transformer model.
253
+ """
254
+
255
+ num_virtual_tokens: int = field(default=None, metadata={"help": "Number of virtual tokens"})
256
+ token_dim: int = field(
257
+ default=None, metadata={"help": "The hidden embedding dimension of the base transformer model"}
258
+ )
259
+ num_transformer_submodules: Optional[int] = field(
260
+ default=None, metadata={"help": "Number of transformer submodules"}
261
+ )
262
+ num_attention_heads: Optional[int] = field(default=None, metadata={"help": "Number of attention heads"})
263
+ num_layers: Optional[int] = field(default=None, metadata={"help": "Number of transformer layers"})
264
+
265
+ @property
266
+ def is_prompt_learning(self) -> bool:
267
+ r"""
268
+ Utility method to check if the configuration is for prompt learning.
269
+ """
270
+ return True
venv/lib/python3.10/site-packages/peft/helpers.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from copy import deepcopy
3
+ from functools import update_wrapper
4
+ from types import MethodType
5
+
6
+ from .peft_model import PeftModel
7
+
8
+
9
+ def update_forward_signature(model: PeftModel) -> None:
10
+ """
11
+ Args:
12
+ Updates the forward signature of the PeftModel to include parents class signature
13
+ model (`PeftModel`): Peft model to update the forward signature
14
+ Example:
15
+
16
+ ```python
17
+ >>> from transformers import WhisperForConditionalGeneration
18
+ >>> from peft import get_peft_model, LoraConfig, update_forward_signature
19
+
20
+ >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en")
21
+ >>> peft_config = LoraConfig(r=8, lora_alpha=32, lora_dropout=0.1, target_modules=["q_proj", "v_proj"])
22
+
23
+ >>> peft_model = get_peft_model(model, peft_config)
24
+ >>> update_forward_signature(peft_model)
25
+ ```
26
+ """
27
+
28
+ # Only update signature when the current forward signature only has *args and **kwargs
29
+ current_signature = inspect.signature(model.forward)
30
+ if (
31
+ len(current_signature.parameters) == 2
32
+ and "args" in current_signature.parameters
33
+ and "kwargs" in current_signature.parameters
34
+ ):
35
+ forward = deepcopy(model.forward.__func__)
36
+ update_wrapper(
37
+ forward, type(model.get_base_model()).forward, assigned=("__doc__", "__name__", "__annotations__")
38
+ )
39
+ model.forward = MethodType(forward, model)
40
+
41
+
42
+ def update_generate_signature(model: PeftModel) -> None:
43
+ """
44
+ Args:
45
+ Updates the generate signature of a PeftModel with overriding generate to include parents class signature
46
+ model (`PeftModel`): Peft model to update the generate signature
47
+ Example:
48
+
49
+ ```python
50
+ >>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
51
+ >>> from peft import get_peft_model, LoraConfig, TaskType, update_generate_signature
52
+
53
+ >>> model_name_or_path = "bigscience/mt0-large"
54
+ >>> tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
55
+ >>> model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path)
56
+
57
+ >>> peft_config = LoraConfig(
58
+ ... task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1
59
+ ... )
60
+ >>> peft_model = get_peft_model(model, peft_config)
61
+ >>> update_generate_signature(peft_model)
62
+ >>> help(peft_model.generate)
63
+ ```
64
+ """
65
+ if not hasattr(model, "generate"):
66
+ return
67
+ current_signature = inspect.signature(model.generate)
68
+ if (
69
+ len(current_signature.parameters) == 2
70
+ and "args" in current_signature.parameters
71
+ and "kwargs" in current_signature.parameters
72
+ ) or (len(current_signature.parameters) == 1 and "kwargs" in current_signature.parameters):
73
+ generate = deepcopy(model.generate.__func__)
74
+ update_wrapper(
75
+ generate,
76
+ type(model.get_base_model()).generate,
77
+ assigned=("__doc__", "__name__", "__annotations__"),
78
+ )
79
+ model.generate = MethodType(generate, model)
80
+
81
+
82
+ def update_signature(model: PeftModel, method: str = "all") -> None:
83
+ """
84
+ Args:
85
+ Updates the signature of a PeftModel include parents class signature for forward or generate method
86
+ model (`PeftModel`): Peft model to update generate or forward signature method (`str`): method to update
87
+ signature choose one of "forward", "generate", "all"
88
+ Example:
89
+ ```python
90
+ >>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
91
+ >>> from peft import get_peft_model, LoraConfig, TaskType, update_signature
92
+
93
+ >>> model_name_or_path = "bigscience/mt0-large"
94
+ >>> tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
95
+ >>> model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path)
96
+
97
+ >>> peft_config = LoraConfig(
98
+ ... task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1
99
+ ... )
100
+ >>> peft_model = get_peft_model(model, peft_config)
101
+ >>> update_signature(peft_model)
102
+ >>> help(peft_model.generate)
103
+ ```
104
+ """
105
+ if method == "forward":
106
+ update_forward_signature(model)
107
+ elif method == "generate":
108
+ update_generate_signature(model)
109
+ elif method == "all":
110
+ update_forward_signature(model)
111
+ update_generate_signature(model)
112
+ else:
113
+ raise ValueError(f"method {method} is not supported please choose one of ['forward', 'generate', 'all']")
venv/lib/python3.10/site-packages/peft/import_utils.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import importlib
15
+ import importlib.metadata as importlib_metadata
16
+ from functools import lru_cache
17
+
18
+ import packaging.version
19
+
20
+
21
+ def is_bnb_available() -> bool:
22
+ return importlib.util.find_spec("bitsandbytes") is not None
23
+
24
+
25
+ def is_bnb_4bit_available() -> bool:
26
+ if not is_bnb_available():
27
+ return False
28
+
29
+ import bitsandbytes as bnb
30
+
31
+ return hasattr(bnb.nn, "Linear4bit")
32
+
33
+
34
+ def is_auto_gptq_available():
35
+ if importlib.util.find_spec("auto_gptq") is not None:
36
+ AUTOGPTQ_MINIMUM_VERSION = packaging.version.parse("0.5.0")
37
+ version_autogptq = packaging.version.parse(importlib_metadata.version("auto_gptq"))
38
+ if AUTOGPTQ_MINIMUM_VERSION <= version_autogptq:
39
+ return True
40
+ else:
41
+ raise ImportError(
42
+ f"Found an incompatible version of auto-gptq. Found version {version_autogptq}, "
43
+ f"but only versions above {AUTOGPTQ_MINIMUM_VERSION} are supported"
44
+ )
45
+
46
+
47
+ def is_optimum_available() -> bool:
48
+ return importlib.util.find_spec("optimum") is not None
49
+
50
+
51
+ @lru_cache
52
+ def is_torch_tpu_available(check_device=True):
53
+ "Checks if `torch_xla` is installed and potentially if a TPU is in the environment"
54
+ if importlib.util.find_spec("torch_xla") is not None:
55
+ if check_device:
56
+ # We need to check if `xla_device` can be found, will raise a RuntimeError if not
57
+ try:
58
+ import torch_xla.core.xla_model as xm
59
+
60
+ _ = xm.xla_device()
61
+ return True
62
+ except RuntimeError:
63
+ return False
64
+ return True
65
+ return False
66
+
67
+
68
+ def is_aqlm_available():
69
+ return importlib.util.find_spec("aqlm") is not None
70
+
71
+
72
+ def is_auto_awq_available():
73
+ return importlib.util.find_spec("awq") is not None
venv/lib/python3.10/site-packages/peft/mapping.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from __future__ import annotations
16
+
17
+ from typing import TYPE_CHECKING, Any
18
+
19
+ import torch
20
+
21
+ from .config import PeftConfig
22
+ from .mixed_model import PeftMixedModel
23
+ from .peft_model import (
24
+ PeftModel,
25
+ PeftModelForCausalLM,
26
+ PeftModelForFeatureExtraction,
27
+ PeftModelForQuestionAnswering,
28
+ PeftModelForSeq2SeqLM,
29
+ PeftModelForSequenceClassification,
30
+ PeftModelForTokenClassification,
31
+ )
32
+ from .tuners import (
33
+ AdaLoraConfig,
34
+ AdaLoraModel,
35
+ AdaptionPromptConfig,
36
+ IA3Config,
37
+ IA3Model,
38
+ LoHaConfig,
39
+ LoHaModel,
40
+ LoKrConfig,
41
+ LoKrModel,
42
+ LoraConfig,
43
+ LoraModel,
44
+ MultitaskPromptTuningConfig,
45
+ OFTConfig,
46
+ OFTModel,
47
+ PolyConfig,
48
+ PolyModel,
49
+ PrefixTuningConfig,
50
+ PromptEncoderConfig,
51
+ PromptTuningConfig,
52
+ )
53
+ from .utils import _prepare_prompt_learning_config
54
+
55
+
56
+ if TYPE_CHECKING:
57
+ from transformers import PreTrainedModel
58
+
59
+
60
+ MODEL_TYPE_TO_PEFT_MODEL_MAPPING: dict[str, PeftModel] = {
61
+ "SEQ_CLS": PeftModelForSequenceClassification,
62
+ "SEQ_2_SEQ_LM": PeftModelForSeq2SeqLM,
63
+ "CAUSAL_LM": PeftModelForCausalLM,
64
+ "TOKEN_CLS": PeftModelForTokenClassification,
65
+ "QUESTION_ANS": PeftModelForQuestionAnswering,
66
+ "FEATURE_EXTRACTION": PeftModelForFeatureExtraction,
67
+ }
68
+
69
+ PEFT_TYPE_TO_CONFIG_MAPPING: dict[str, PeftConfig] = {
70
+ "ADAPTION_PROMPT": AdaptionPromptConfig,
71
+ "PROMPT_TUNING": PromptTuningConfig,
72
+ "PREFIX_TUNING": PrefixTuningConfig,
73
+ "P_TUNING": PromptEncoderConfig,
74
+ "LORA": LoraConfig,
75
+ "LOHA": LoHaConfig,
76
+ "LOKR": LoKrConfig,
77
+ "ADALORA": AdaLoraConfig,
78
+ "IA3": IA3Config,
79
+ "MULTITASK_PROMPT_TUNING": MultitaskPromptTuningConfig,
80
+ "OFT": OFTConfig,
81
+ "POLY": PolyConfig,
82
+ }
83
+
84
+ PEFT_TYPE_TO_TUNER_MAPPING = {
85
+ "LORA": LoraModel,
86
+ "LOHA": LoHaModel,
87
+ "LOKR": LoKrModel,
88
+ "ADALORA": AdaLoraModel,
89
+ "IA3": IA3Model,
90
+ "OFT": OFTModel,
91
+ "POLY": PolyModel,
92
+ }
93
+
94
+
95
+ def get_peft_config(config_dict: dict[str, Any]) -> PeftConfig:
96
+ """
97
+ Returns a Peft config object from a dictionary.
98
+
99
+ Args:
100
+ config_dict (`Dict[str, Any]`): Dictionary containing the configuration parameters.
101
+ """
102
+
103
+ return PEFT_TYPE_TO_CONFIG_MAPPING[config_dict["peft_type"]](**config_dict)
104
+
105
+
106
+ def get_peft_model(
107
+ model: PreTrainedModel, peft_config: PeftConfig, adapter_name: str = "default", mixed: bool = False
108
+ ) -> PeftModel | PeftMixedModel:
109
+ """
110
+ Returns a Peft model object from a model and a config.
111
+
112
+ Args:
113
+ model ([`transformers.PreTrainedModel`]):
114
+ Model to be wrapped.
115
+ peft_config ([`PeftConfig`]):
116
+ Configuration object containing the parameters of the Peft model.
117
+ adapter_name (`str`, `optional`, defaults to `"default"`):
118
+ The name of the adapter to be injected, if not provided, the default adapter name is used ("default").
119
+ mixed (`bool`, `optional`, defaults to `False`):
120
+ Whether to allow mixing different (compatible) adapter types.
121
+ """
122
+ model_config = getattr(model, "config", {"model_type": "custom"})
123
+ if hasattr(model_config, "to_dict"):
124
+ model_config = model_config.to_dict()
125
+
126
+ peft_config.base_model_name_or_path = model.__dict__.get("name_or_path", None)
127
+
128
+ if mixed:
129
+ return PeftMixedModel(model, peft_config, adapter_name=adapter_name)
130
+
131
+ if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and not peft_config.is_prompt_learning:
132
+ return PeftModel(model, peft_config, adapter_name=adapter_name)
133
+
134
+ if peft_config.is_prompt_learning:
135
+ peft_config = _prepare_prompt_learning_config(peft_config, model_config)
136
+ return MODEL_TYPE_TO_PEFT_MODEL_MAPPING[peft_config.task_type](model, peft_config, adapter_name=adapter_name)
137
+
138
+
139
+ def inject_adapter_in_model(
140
+ peft_config: PeftConfig, model: torch.nn.Module, adapter_name: str = "default"
141
+ ) -> torch.nn.Module:
142
+ r"""
143
+ A simple API to create and inject adapter in-place into a model. Currently the API does not support prompt learning
144
+ methods and adaption prompt. Make sure to have the correct `target_names` set in the `peft_config` object. The API
145
+ calls `get_peft_model` under the hood but would be restricted only to non-prompt learning methods.
146
+
147
+ Args:
148
+ peft_config (`PeftConfig`):
149
+ Configuration object containing the parameters of the Peft model.
150
+ model (`torch.nn.Module`):
151
+ The input model where the adapter will be injected.
152
+ adapter_name (`str`, `optional`, defaults to `"default"`):
153
+ The name of the adapter to be injected, if not provided, the default adapter name is used ("default").
154
+ """
155
+ if peft_config.is_prompt_learning or peft_config.is_adaption_prompt:
156
+ raise ValueError("`create_and_replace` does not support prompt learning and adaption prompt yet.")
157
+
158
+ if peft_config.peft_type not in PEFT_TYPE_TO_TUNER_MAPPING.keys():
159
+ raise ValueError(
160
+ f"`inject_adapter_in_model` does not support {peft_config.peft_type} yet. Please use `get_peft_model`."
161
+ )
162
+
163
+ tuner_cls = PEFT_TYPE_TO_TUNER_MAPPING[peft_config.peft_type]
164
+
165
+ # By instantiating a peft model we are injecting randomly initialized LoRA layers into the model's modules.
166
+ peft_model = tuner_cls(model, peft_config, adapter_name=adapter_name)
167
+
168
+ return peft_model.model
venv/lib/python3.10/site-packages/peft/mixed_model.py ADDED
@@ -0,0 +1,409 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from __future__ import annotations
16
+
17
+ import os
18
+ from contextlib import contextmanager
19
+ from typing import Any, Optional, Union
20
+
21
+ import torch
22
+ from accelerate.hooks import remove_hook_from_submodules
23
+ from torch import nn
24
+ from transformers.utils import PushToHubMixin
25
+
26
+ from peft.tuners.mixed import COMPATIBLE_TUNER_TYPES
27
+
28
+ from .config import PeftConfig
29
+ from .peft_model import PeftModel
30
+ from .tuners import (
31
+ AdaLoraModel,
32
+ IA3Model,
33
+ LoHaModel,
34
+ LoKrModel,
35
+ LoraModel,
36
+ MixedModel,
37
+ OFTModel,
38
+ )
39
+ from .utils import PeftType, _set_adapter, _set_trainable
40
+
41
+
42
+ PEFT_TYPE_TO_MODEL_MAPPING = {
43
+ PeftType.LORA: LoraModel,
44
+ PeftType.LOHA: LoHaModel,
45
+ PeftType.LOKR: LoKrModel,
46
+ PeftType.ADALORA: AdaLoraModel,
47
+ PeftType.IA3: IA3Model,
48
+ PeftType.OFT: OFTModel,
49
+ }
50
+
51
+
52
+ def _prepare_model_for_gradient_checkpointing(model: nn.Module) -> None:
53
+ r"""
54
+ Prepares the model for gradient checkpointing if necessary
55
+ """
56
+ # Note: same as PeftModel._prepare_model_for_gradient_checkpointing
57
+ if not getattr(model, "is_gradient_checkpointing", True):
58
+ return model
59
+
60
+ if not (
61
+ getattr(model, "is_loaded_in_8bit", False)
62
+ or getattr(model, "is_loaded_in_4bit", False)
63
+ or getattr(model, "is_quantized", False)
64
+ ):
65
+ if hasattr(model, "enable_input_require_grads"):
66
+ model.enable_input_require_grads()
67
+ elif hasattr(model, "get_input_embeddings"):
68
+
69
+ def make_inputs_require_grad(module, input, output):
70
+ output.requires_grad_(True)
71
+
72
+ model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
73
+
74
+
75
+ def _check_config_compatible(peft_config: PeftConfig) -> None:
76
+ if peft_config.peft_type not in COMPATIBLE_TUNER_TYPES:
77
+ raise ValueError(
78
+ f"The provided `peft_type` '{peft_config.peft_type.value}' is not compatible with the `PeftMixedModel`. "
79
+ f"Compatible types are: {COMPATIBLE_TUNER_TYPES}"
80
+ )
81
+
82
+
83
+ class PeftMixedModel(PushToHubMixin, torch.nn.Module):
84
+ """
85
+ PeftMixedModel for loading mixing different types of adapters for inference.
86
+
87
+ This class does not support loading/saving, and it shouldn't usually be initialized directly. Instead, use
88
+ `get_peft_model` with the argument `mixed=True`.
89
+
90
+ <Tip>
91
+
92
+ Read the [Mixed adapter types](https://huggingface.co/docs/peft/en/developer_guides/mixed_models) guide to learn
93
+ more about using different adapter types.
94
+
95
+ </Tip>
96
+
97
+ Example:
98
+
99
+ ```py
100
+ >>> from peft import get_peft_model
101
+
102
+ >>> base_model = ... # load the base model, e.g. from transformers
103
+ >>> peft_model = PeftMixedModel.from_pretrained(base_model, path_to_adapter1, "adapter1").eval()
104
+ >>> peft_model.load_adapter(path_to_adapter2, "adapter2")
105
+ >>> peft_model.set_adapter(["adapter1", "adapter2"]) # activate both adapters
106
+ >>> peft_model(data) # forward pass using both adapters
107
+ ```
108
+
109
+ Args:
110
+ model (`torch.nn.Module`):
111
+ The model to be tuned.
112
+ config (`PeftConfig`):
113
+ The config of the model to be tuned. The adapter type must be compatible.
114
+ adapter_name (`str`, `optional`, defaults to `"default"`):
115
+ The name of the first adapter.
116
+ """
117
+
118
+ def __init__(self, model: nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None:
119
+ super().__init__()
120
+ _check_config_compatible(peft_config)
121
+ _prepare_model_for_gradient_checkpointing(model)
122
+ self.modules_to_save = None
123
+ self.base_model = MixedModel(model, {adapter_name: peft_config}, adapter_name)
124
+ self.set_modules_to_save(peft_config, adapter_name)
125
+
126
+ self.config = getattr(model, "config", {"model_type": "custom"})
127
+
128
+ # the `pretraining_tp` is set for some models to simulate Tensor Parallelism during inference to avoid
129
+ # numerical differences, https://github.com/pytorch/pytorch/issues/76232 - to avoid any unexpected
130
+ # behavior we disable that in this line.
131
+ if hasattr(self.base_model, "config") and hasattr(self.base_model.config, "pretraining_tp"):
132
+ self.base_model.config.pretraining_tp = 1
133
+
134
+ @property
135
+ def peft_config(self) -> dict[str, PeftConfig]:
136
+ return self.base_model.peft_config
137
+
138
+ @property
139
+ def active_adapter(self) -> str:
140
+ return self.base_model.active_adapter
141
+
142
+ @property
143
+ def active_adapters(self) -> list[str]:
144
+ return self.base_model.active_adapters
145
+
146
+ def get_nb_trainable_parameters(self):
147
+ r"""
148
+ Returns the number of trainable parameters and number of all parameters in the model.
149
+ """
150
+ # note: same as PeftModel.get_nb_trainable_parameters
151
+ trainable_params = 0
152
+ all_param = 0
153
+ for _, param in self.named_parameters():
154
+ num_params = param.numel()
155
+ # if using DS Zero 3 and the weights are initialized empty
156
+ if num_params == 0 and hasattr(param, "ds_numel"):
157
+ num_params = param.ds_numel
158
+
159
+ # Due to the design of 4bit linear layers from bitsandbytes
160
+ # one needs to multiply the number of parameters by 2 to get
161
+ # the correct number of parameters
162
+ if param.__class__.__name__ == "Params4bit":
163
+ num_params = num_params * 2
164
+
165
+ all_param += num_params
166
+ if param.requires_grad:
167
+ trainable_params += num_params
168
+
169
+ return trainable_params, all_param
170
+
171
+ def print_trainable_parameters(self):
172
+ """
173
+ Prints the number of trainable parameters in the model.
174
+
175
+ Note: print_trainable_parameters() uses get_nb_trainable_parameters() which is different from
176
+ num_parameters(only_trainable=True) from huggingface/transformers. get_nb_trainable_parameters() returns
177
+ (trainable parameters, all parameters) of the Peft Model which includes modified backbone transformer model.
178
+ For techniques like LoRA, the backbone transformer model is modified in place with LoRA modules. However, for
179
+ prompt tuning, the backbone transformer model is unmodified. num_parameters(only_trainable=True) returns number
180
+ of trainable parameters of the backbone transformer model which can be different.
181
+ """
182
+ # note: same as PeftModel.print_trainable_parameters
183
+ trainable_params, all_param = self.get_nb_trainable_parameters()
184
+
185
+ print(
186
+ f"trainable params: {trainable_params:,d} || "
187
+ f"all params: {all_param:,d} || "
188
+ f"trainable%: {100 * trainable_params / all_param:.4f}"
189
+ )
190
+
191
+ def __getattr__(self, name: str):
192
+ """Forward missing attributes to the wrapped module."""
193
+ try:
194
+ return super().__getattr__(name) # defer to nn.Module's logic
195
+ except AttributeError:
196
+ return getattr(self.base_model, name)
197
+
198
+ def forward(self, *args: Any, **kwargs: Any):
199
+ """
200
+ Forward pass of the model.
201
+ """
202
+ return self.base_model(*args, **kwargs)
203
+
204
+ def generate(self, *args: Any, **kwargs: Any):
205
+ """
206
+ Generate output.
207
+ """
208
+ return self.base_model.generate(*args, **kwargs)
209
+
210
+ @contextmanager
211
+ def disable_adapter(self):
212
+ """
213
+ Disables the adapter module.
214
+ """
215
+ try:
216
+ self.base_model.disable_adapter_layers()
217
+ yield
218
+ finally:
219
+ self.base_model.enable_adapter_layers()
220
+
221
+ def add_adapter(self, adapter_name: str, peft_config: PeftConfig):
222
+ _check_config_compatible(peft_config)
223
+
224
+ try:
225
+ self.peft_config[adapter_name] = peft_config
226
+ self.base_model.inject_adapter(self, adapter_name)
227
+ except Exception: # something went wrong, roll back
228
+ if adapter_name in self.peft_config:
229
+ del self.peft_config[adapter_name]
230
+ raise
231
+
232
+ self.set_modules_to_save(peft_config, adapter_name)
233
+
234
+ def set_modules_to_save(self, peft_config: PeftConfig, adapter_name: str) -> None:
235
+ if (modules_to_save := getattr(peft_config, "modules_to_save", None)) is None:
236
+ return
237
+
238
+ if self.modules_to_save is None:
239
+ self.modules_to_save = set(modules_to_save)
240
+ else:
241
+ self.modules_to_save.update(modules_to_save)
242
+ _set_trainable(self, adapter_name)
243
+
244
+ def set_adapter(self, adapter_name: Union[str, list[str]]) -> None:
245
+ """
246
+ Sets the active adapter(s) for the model.
247
+
248
+ Note that the order in which the adapters are applied during the forward pass may not be the same as the order
249
+ in which they are passed to this function. Instead, the order during the forward pass is determined by the
250
+ order in which the adapters were loaded into the model. The active adapters only determine which adapters are
251
+ active during the forward pass, but not the order in which they are applied.
252
+
253
+ Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is
254
+ not desired, use the following code.
255
+
256
+ ```py
257
+ >>> for name, param in model_peft.named_parameters():
258
+ ... if ...: # some check on name (ex. if 'lora' in name)
259
+ ... param.requires_grad = False
260
+ ```
261
+
262
+ Args:
263
+ adapter_name (`str` or `List[str]`):
264
+ The name of the adapter(s) to be activated.
265
+ """
266
+ if isinstance(adapter_name, str):
267
+ adapter_name = [adapter_name]
268
+
269
+ mismatched = set(adapter_name) - set(self.peft_config.keys())
270
+ if mismatched:
271
+ raise ValueError(
272
+ f"Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}"
273
+ )
274
+
275
+ self.base_model.set_adapter(adapter_name)
276
+ _set_adapter(self, adapter_name)
277
+
278
+ def delete_adapter(self, adapter_name: Union[str, list[str]]) -> None:
279
+ if isinstance(adapter_name, str):
280
+ adapter_name = [adapter_name]
281
+
282
+ mismatched = set(adapter_name) - set(self.peft_config.keys())
283
+ if mismatched:
284
+ raise ValueError(
285
+ f"Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}"
286
+ )
287
+
288
+ self.base_model.delete_adapter(adapter_name)
289
+
290
+ def merge_and_unload(self, *args: Any, **kwargs: Any):
291
+ r"""
292
+ This method merges the adapter layers into the base model. This is needed if someone wants to use the base
293
+ model as a standalone model.
294
+
295
+ Args:
296
+ progressbar (`bool`):
297
+ whether to show a progressbar indicating the unload and merge process
298
+ safe_merge (`bool`):
299
+ whether to activate the safe merging check to check if there is any potential Nan in the adapter
300
+ weights
301
+ adapter_names (`List[str]`, *optional*):
302
+ The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
303
+ to `None`.
304
+ """
305
+ return self.base_model.merge_and_unload(*args, **kwargs)
306
+
307
+ def unload(self, *args: Any, **kwargs: Any):
308
+ """
309
+ Gets back the base model by removing all the adapter modules without merging. This gives back the original base
310
+ model.
311
+ """
312
+ return self.base_model.unload(*args, **kwargs)
313
+
314
+ @classmethod
315
+ def _split_kwargs(cls, kwargs: dict[str, Any]):
316
+ return PeftModel._split_kwargs(kwargs)
317
+
318
+ def load_adapter(self, model_id: str, adapter_name: str, *args: Any, **kwargs: Any):
319
+ output = PeftModel.load_adapter(self, model_id, adapter_name, *args, **kwargs)
320
+ # TODO: not quite clear why this is necessary but tests fail without it
321
+ self.set_adapter(self.active_adapters)
322
+ return output
323
+
324
+ def create_or_update_model_card(self, output_dir: str):
325
+ raise NotImplementedError(f"Model card creation is not supported for {self.__class__.__name__} (yet).")
326
+
327
+ def save_pretrained(
328
+ self,
329
+ save_directory: str,
330
+ safe_serialization: bool = False,
331
+ selected_adapters: Optional[list[str]] = None,
332
+ **kwargs: Any,
333
+ ):
334
+ raise NotImplementedError(f"Saving is not supported for {self.__class__.__name__} (yet).")
335
+
336
+ @classmethod
337
+ def from_pretrained(
338
+ cls,
339
+ model: nn.Module,
340
+ model_id: str | os.PathLike,
341
+ adapter_name: str = "default",
342
+ is_trainable: bool = False,
343
+ config: Optional[PeftConfig] = None,
344
+ **kwargs: Any,
345
+ ):
346
+ r"""
347
+ Instantiate a PEFT mixed model from a pretrained model and loaded PEFT weights.
348
+
349
+ Note that the passed `model` may be modified inplace.
350
+
351
+ Args:
352
+ model (`nn.Module`):
353
+ The model to be adapted.
354
+ model_id (`str` or `os.PathLike`):
355
+ The name of the PEFT configuration to use. Can be either:
356
+ - A string, the `model id` of a PEFT configuration hosted inside a model repo on the Hugging Face
357
+ Hub.
358
+ - A path to a directory containing a PEFT configuration file saved using the `save_pretrained`
359
+ method (`./my_peft_config_directory/`).
360
+ adapter_name (`str`, *optional*, defaults to `"default"`):
361
+ The name of the adapter to be loaded. This is useful for loading multiple adapters.
362
+ is_trainable (`bool`, *optional*, defaults to `False`):
363
+ Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and use for
364
+ inference
365
+ config ([`~peft.PeftConfig`], *optional*):
366
+ The configuration object to use instead of an automatically loaded configuration. This configuration
367
+ object is mutually exclusive with `model_id` and `kwargs`. This is useful when configuration is already
368
+ loaded before calling `from_pretrained`.
369
+ kwargs: (`optional`):
370
+ Additional keyword arguments passed along to the specific PEFT configuration class.
371
+ """
372
+ # note: adapted from PeftModel.from_pretrained
373
+ from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING
374
+
375
+ # load the config
376
+ if config is None:
377
+ config = PEFT_TYPE_TO_CONFIG_MAPPING[
378
+ PeftConfig._get_peft_type(
379
+ model_id,
380
+ subfolder=kwargs.get("subfolder", None),
381
+ revision=kwargs.get("revision", None),
382
+ cache_dir=kwargs.get("cache_dir", None),
383
+ use_auth_token=kwargs.get("use_auth_token", None),
384
+ )
385
+ ].from_pretrained(model_id, **kwargs)
386
+ elif isinstance(config, PeftConfig):
387
+ config.inference_mode = not is_trainable
388
+ else:
389
+ raise ValueError(f"The input config must be a PeftConfig, got {config.__class__}")
390
+
391
+ # note: this is different from PeftModel.from_pretrained
392
+ if config.peft_type not in PEFT_TYPE_TO_MODEL_MAPPING:
393
+ raise ValueError(f"Adapter of type {config.peft_type} is not supported for mixed models.")
394
+
395
+ if (getattr(model, "hf_device_map", None) is not None) and len(
396
+ set(model.hf_device_map.values()).intersection({"cpu", "disk"})
397
+ ) > 0:
398
+ remove_hook_from_submodules(model)
399
+
400
+ if config.is_prompt_learning and is_trainable:
401
+ # note: should not be possible to reach, but just in case
402
+ raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.")
403
+ else:
404
+ config.inference_mode = not is_trainable
405
+
406
+ # note: this is different from PeftModel.from_pretrained, we always return a PeftMixedModel
407
+ model = cls(model, config, adapter_name)
408
+ model.load_adapter(model_id, adapter_name, is_trainable=is_trainable, **kwargs)
409
+ return model
venv/lib/python3.10/site-packages/peft/peft_model.py ADDED
@@ -0,0 +1,1986 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from __future__ import annotations
16
+
17
+ import collections
18
+ import inspect
19
+ import os
20
+ import warnings
21
+ from contextlib import contextmanager
22
+ from copy import deepcopy
23
+ from typing import Any, Optional, Union
24
+
25
+ import packaging.version
26
+ import torch
27
+ import transformers
28
+ from accelerate import dispatch_model, infer_auto_device_map
29
+ from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules
30
+ from accelerate.utils import get_balanced_memory
31
+ from huggingface_hub import ModelCard, ModelCardData, hf_hub_download
32
+ from safetensors.torch import save_file as safe_save_file
33
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
34
+ from transformers import PreTrainedModel
35
+ from transformers.modeling_outputs import QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
36
+ from transformers.utils import PushToHubMixin
37
+
38
+ from . import __version__
39
+ from .config import PeftConfig
40
+ from .tuners import (
41
+ AdaLoraModel,
42
+ AdaptionPromptModel,
43
+ IA3Model,
44
+ LoHaModel,
45
+ LoKrModel,
46
+ LoraModel,
47
+ MultitaskPromptEmbedding,
48
+ OFTModel,
49
+ PolyModel,
50
+ PrefixEncoder,
51
+ PromptEmbedding,
52
+ PromptEncoder,
53
+ )
54
+ from .utils import (
55
+ SAFETENSORS_WEIGHTS_NAME,
56
+ TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING,
57
+ WEIGHTS_NAME,
58
+ PeftType,
59
+ TaskType,
60
+ _get_batch_size,
61
+ _prepare_prompt_learning_config,
62
+ _set_adapter,
63
+ _set_trainable,
64
+ get_peft_model_state_dict,
65
+ id_tensor_storage,
66
+ infer_device,
67
+ load_peft_weights,
68
+ set_peft_model_state_dict,
69
+ shift_tokens_right,
70
+ )
71
+
72
+
73
+ PEFT_TYPE_TO_MODEL_MAPPING = {
74
+ PeftType.LORA: LoraModel,
75
+ PeftType.LOHA: LoHaModel,
76
+ PeftType.LOKR: LoKrModel,
77
+ PeftType.PROMPT_TUNING: PromptEmbedding,
78
+ PeftType.P_TUNING: PromptEncoder,
79
+ PeftType.PREFIX_TUNING: PrefixEncoder,
80
+ PeftType.ADALORA: AdaLoraModel,
81
+ PeftType.ADAPTION_PROMPT: AdaptionPromptModel,
82
+ PeftType.IA3: IA3Model,
83
+ PeftType.OFT: OFTModel,
84
+ PeftType.POLY: PolyModel,
85
+ }
86
+
87
+
88
+ class PeftModel(PushToHubMixin, torch.nn.Module):
89
+ """
90
+ Base model encompassing various Peft methods.
91
+
92
+ Args:
93
+ model ([`~transformers.PreTrainedModel`]): The base transformer model used for Peft.
94
+ peft_config ([`PeftConfig`]): The configuration of the Peft model.
95
+ adapter_name (`str`, *optional*): The name of the adapter, defaults to `"default"`.
96
+
97
+ **Attributes**:
98
+ - **base_model** ([`torch.nn.Module`]) -- The base transformer model used for Peft.
99
+ - **peft_config** ([`PeftConfig`]) -- The configuration of the Peft model.
100
+ - **modules_to_save** (`list` of `str`) -- The list of sub-module names to save when
101
+ saving the model.
102
+ - **prompt_encoder** ([`PromptEncoder`]) -- The prompt encoder used for Peft if
103
+ using [`PromptLearningConfig`].
104
+ - **prompt_tokens** (`torch.Tensor`) -- The virtual prompt tokens used for Peft if
105
+ using [`PromptLearningConfig`].
106
+ - **transformer_backbone_name** (`str`) -- The name of the transformer
107
+ backbone in the base model if using [`PromptLearningConfig`].
108
+ - **word_embeddings** (`torch.nn.Embedding`) -- The word embeddings of the transformer backbone
109
+ in the base model if using [`PromptLearningConfig`].
110
+ """
111
+
112
+ def __init__(self, model: PreTrainedModel, peft_config: PeftConfig, adapter_name: str = "default") -> None:
113
+ super().__init__()
114
+ self.modules_to_save = None
115
+ self.active_adapter = adapter_name
116
+ self.peft_type = peft_config.peft_type
117
+ # These args are special PEFT arguments that users can pass. They need to be removed before passing them to
118
+ # forward.
119
+ self.special_peft_forward_args = {"adapter_names"}
120
+
121
+ self._is_prompt_learning = peft_config.is_prompt_learning
122
+ if self._is_prompt_learning:
123
+ self._peft_config = {adapter_name: peft_config}
124
+ self.base_model = model
125
+ self.add_adapter(adapter_name, peft_config)
126
+ else:
127
+ self._peft_config = None
128
+ cls = PEFT_TYPE_TO_MODEL_MAPPING[peft_config.peft_type]
129
+ self.base_model = cls(model, {adapter_name: peft_config}, adapter_name)
130
+ self.set_additional_trainable_modules(peft_config, adapter_name)
131
+
132
+ if getattr(model, "is_gradient_checkpointing", True):
133
+ model = self._prepare_model_for_gradient_checkpointing(model)
134
+
135
+ # the `pretraining_tp` is set for some models to simulate Tensor Parallelism during inference to avoid
136
+ # numerical differences, https://github.com/pytorch/pytorch/issues/76232 - to avoid any unexpected
137
+ # behavior we disable that in this line.
138
+ if hasattr(self.base_model, "config") and hasattr(self.base_model.config, "pretraining_tp"):
139
+ self.base_model.config.pretraining_tp = 1
140
+
141
+ @property
142
+ def peft_config(self) -> dict[str, PeftConfig]:
143
+ if self._is_prompt_learning:
144
+ return self._peft_config
145
+ return self.base_model.peft_config
146
+
147
+ @property
148
+ def active_adapters(self) -> list[str]:
149
+ try:
150
+ adapters = self.base_model.active_adapters
151
+ except AttributeError:
152
+ adapters = self.active_adapter
153
+ if isinstance(adapters, str):
154
+ adapters = [adapters]
155
+ return adapters
156
+
157
+ @peft_config.setter
158
+ def peft_config(self, value: dict[str, PeftConfig]):
159
+ if self._is_prompt_learning:
160
+ self._peft_config = value
161
+ else:
162
+ self.base_model.peft_config = value
163
+
164
+ def save_pretrained(
165
+ self,
166
+ save_directory: str,
167
+ safe_serialization: bool = True,
168
+ selected_adapters: Optional[list[str]] = None,
169
+ save_embedding_layers: Union[str, bool] = "auto",
170
+ is_main_process: bool = True,
171
+ **kwargs: Any,
172
+ ) -> None:
173
+ r"""
174
+ This function saves the adapter model and the adapter configuration files to a directory, so that it can be
175
+ reloaded using the [`PeftModel.from_pretrained`] class method, and also used by the [`PeftModel.push_to_hub`]
176
+ method.
177
+
178
+ Args:
179
+ save_directory (`str`):
180
+ Directory where the adapter model and configuration files will be saved (will be created if it does not
181
+ exist).
182
+ safe_serialization (`bool`, *optional*):
183
+ Whether to save the adapter files in safetensors format, defaults to `True`.
184
+ selected_adapters (`List[str]`, *optional*):
185
+ A list of adapters to be saved. If `None`, will default to all adapters.
186
+ save_embedding_layers (`Union[bool, str]`, *optional*, defaults to `"auto"`):
187
+ If `True`, save the embedding layers in addition to adapter weights. If `auto`, checks the common
188
+ embedding layers `peft.utils.other.EMBEDDING_LAYER_NAMES` in config's `target_modules` when available.
189
+ and automatically sets the boolean flag. This only works for 🤗 transformers models.
190
+ is_main_process (`bool`, *optional*):
191
+ Whether the process calling this is the main process or not. Will default to `True`. Will not save the
192
+ checkpoint if not on the main process, which is important for multi device setups (e.g. DDP).
193
+ kwargs (additional keyword arguments, *optional*):
194
+ Additional keyword arguments passed along to the `push_to_hub` method.
195
+ """
196
+ if os.path.isfile(save_directory):
197
+ raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file")
198
+
199
+ if selected_adapters is None:
200
+ selected_adapters = list(self.peft_config.keys())
201
+ else:
202
+ if any(
203
+ selected_adapter_name not in list(self.peft_config.keys())
204
+ for selected_adapter_name in selected_adapters
205
+ ):
206
+ raise ValueError(
207
+ f"You passed an invalid `selected_adapters` arguments, current supported adapter names are"
208
+ f" {list(self.peft_config.keys())} - got {selected_adapters}."
209
+ )
210
+
211
+ if is_main_process:
212
+ os.makedirs(save_directory, exist_ok=True)
213
+ self.create_or_update_model_card(save_directory)
214
+
215
+ for adapter_name in selected_adapters:
216
+ peft_config = self.peft_config[adapter_name]
217
+ # save only the trainable weights
218
+ output_state_dict = get_peft_model_state_dict(
219
+ self,
220
+ state_dict=kwargs.get("state_dict", None),
221
+ adapter_name=adapter_name,
222
+ save_embedding_layers=save_embedding_layers,
223
+ )
224
+ output_dir = os.path.join(save_directory, adapter_name) if adapter_name != "default" else save_directory
225
+ os.makedirs(output_dir, exist_ok=True)
226
+
227
+ if is_main_process and safe_serialization:
228
+ # Section copied from: https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_utils.py#L2111-L2134
229
+ # Safetensors does not allow tensor aliasing.
230
+ # We're going to remove aliases before saving
231
+ ptrs = collections.defaultdict(list)
232
+ for name, tensor in output_state_dict.items():
233
+ # Sometimes in the state_dict we have non-tensor objects.
234
+ # e.g. in bitsandbytes we have some `str` objects in the state_dict
235
+ if isinstance(tensor, torch.Tensor):
236
+ ptrs[id_tensor_storage(tensor)].append(name)
237
+ else:
238
+ # In the non-tensor case, fall back to the pointer of the object itself
239
+ ptrs[id(tensor)].append(name)
240
+
241
+ # These are all the pointers of shared tensors.
242
+ shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1}
243
+
244
+ for _, names in shared_ptrs.items():
245
+ # Here we just clone the shared tensors to avoid tensor aliasing which is
246
+ # not supported in safetensors.
247
+ for shared_tensor_name in names[1:]:
248
+ output_state_dict[shared_tensor_name] = output_state_dict[shared_tensor_name].clone()
249
+
250
+ safe_save_file(
251
+ output_state_dict,
252
+ os.path.join(output_dir, SAFETENSORS_WEIGHTS_NAME),
253
+ metadata={"format": "pt"},
254
+ )
255
+ elif is_main_process:
256
+ torch.save(output_state_dict, os.path.join(output_dir, WEIGHTS_NAME))
257
+
258
+ # save the config and change the inference mode to `True`
259
+ if peft_config.base_model_name_or_path is None:
260
+ peft_config.base_model_name_or_path = (
261
+ self.base_model.__dict__.get("name_or_path", None)
262
+ if peft_config.is_prompt_learning
263
+ else self.base_model.model.__dict__.get("name_or_path", None)
264
+ )
265
+ inference_mode = peft_config.inference_mode
266
+ peft_config.inference_mode = True
267
+
268
+ if peft_config.task_type is None:
269
+ # deal with auto mapping
270
+ base_model_class = self._get_base_model_class(
271
+ is_prompt_tuning=peft_config.is_prompt_learning,
272
+ )
273
+ parent_library = base_model_class.__module__
274
+
275
+ auto_mapping_dict = {
276
+ "base_model_class": base_model_class.__name__,
277
+ "parent_library": parent_library,
278
+ }
279
+ else:
280
+ auto_mapping_dict = None
281
+
282
+ if is_main_process:
283
+ peft_config.save_pretrained(output_dir, auto_mapping_dict=auto_mapping_dict)
284
+ peft_config.inference_mode = inference_mode
285
+
286
+ @classmethod
287
+ def from_pretrained(
288
+ cls,
289
+ model: torch.nn.Module,
290
+ model_id: Union[str, os.PathLike],
291
+ adapter_name: str = "default",
292
+ is_trainable: bool = False,
293
+ config: Optional[PeftConfig] = None,
294
+ **kwargs: Any,
295
+ ) -> PeftModel:
296
+ r"""
297
+ Instantiate a PEFT model from a pretrained model and loaded PEFT weights.
298
+
299
+ Note that the passed `model` may be modified inplace.
300
+
301
+ Args:
302
+ model ([`torch.nn.Module`]):
303
+ The model to be adapted. For 🤗 Transformers models, the model should be initialized with the
304
+ [`~transformers.PreTrainedModel.from_pretrained`].
305
+ model_id (`str` or `os.PathLike`):
306
+ The name of the PEFT configuration to use. Can be either:
307
+ - A string, the `model id` of a PEFT configuration hosted inside a model repo on the Hugging Face
308
+ Hub.
309
+ - A path to a directory containing a PEFT configuration file saved using the `save_pretrained`
310
+ method (`./my_peft_config_directory/`).
311
+ adapter_name (`str`, *optional*, defaults to `"default"`):
312
+ The name of the adapter to be loaded. This is useful for loading multiple adapters.
313
+ is_trainable (`bool`, *optional*, defaults to `False`):
314
+ Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and can only be
315
+ used for inference.
316
+ config ([`~peft.PeftConfig`], *optional*):
317
+ The configuration object to use instead of an automatically loaded configuration. This configuration
318
+ object is mutually exclusive with `model_id` and `kwargs`. This is useful when configuration is already
319
+ loaded before calling `from_pretrained`.
320
+ kwargs: (`optional`):
321
+ Additional keyword arguments passed along to the specific PEFT configuration class.
322
+ """
323
+ from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PEFT_TYPE_TO_CONFIG_MAPPING
324
+
325
+ # load the config
326
+ if config is None:
327
+ config = PEFT_TYPE_TO_CONFIG_MAPPING[
328
+ PeftConfig._get_peft_type(
329
+ model_id,
330
+ subfolder=kwargs.get("subfolder", None),
331
+ revision=kwargs.get("revision", None),
332
+ cache_dir=kwargs.get("cache_dir", None),
333
+ use_auth_token=kwargs.get("use_auth_token", None),
334
+ token=kwargs.get("token", None),
335
+ )
336
+ ].from_pretrained(model_id, **kwargs)
337
+ elif isinstance(config, PeftConfig):
338
+ config.inference_mode = not is_trainable
339
+ else:
340
+ raise ValueError(f"The input config must be a PeftConfig, got {config.__class__}")
341
+
342
+ if (getattr(model, "hf_device_map", None) is not None) and len(
343
+ set(model.hf_device_map.values()).intersection({"cpu", "disk"})
344
+ ) > 0:
345
+ remove_hook_from_submodules(model)
346
+
347
+ if config.is_prompt_learning and is_trainable:
348
+ raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.")
349
+ else:
350
+ config.inference_mode = not is_trainable
351
+
352
+ if config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys():
353
+ model = cls(model, config, adapter_name)
354
+ else:
355
+ model = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[config.task_type](model, config, adapter_name)
356
+ model.load_adapter(model_id, adapter_name, is_trainable=is_trainable, **kwargs)
357
+ return model
358
+
359
+ def _setup_prompt_encoder(self, adapter_name: str):
360
+ config = self.peft_config[adapter_name]
361
+ if not hasattr(self, "prompt_encoder"):
362
+ self.prompt_encoder = torch.nn.ModuleDict({})
363
+ self.prompt_tokens = {}
364
+ transformer_backbone = None
365
+ for name, module in self.base_model.named_children():
366
+ for param in module.parameters():
367
+ param.requires_grad = False
368
+ if isinstance(module, PreTrainedModel):
369
+ # Make sure to freeze Tranformers model
370
+ if transformer_backbone is None:
371
+ transformer_backbone = module
372
+ self.transformer_backbone_name = name
373
+ if transformer_backbone is None:
374
+ transformer_backbone = self.base_model
375
+
376
+ if config.num_transformer_submodules is None:
377
+ config.num_transformer_submodules = 2 if config.task_type == TaskType.SEQ_2_SEQ_LM else 1
378
+
379
+ for named_param, value in list(transformer_backbone.named_parameters()):
380
+ # for ZeRO-3, the tensor is sharded across accelerators and deepspeed modifies it to a tensor with shape [0]
381
+ # the actual unsharded shape is stored in "ds_shape" attribute
382
+ # special handling is needed in case the model is initialized in deepspeed.zero.Init() context or HfDeepSpeedConfig
383
+ # has been called before
384
+ # For reference refer to issue: https://github.com/huggingface/peft/issues/996
385
+ deepspeed_distributed_tensor_shape = getattr(value, "ds_shape", None)
386
+
387
+ if value.shape[0] == self.base_model.config.vocab_size or (
388
+ deepspeed_distributed_tensor_shape is not None
389
+ and deepspeed_distributed_tensor_shape[0] == self.base_model.config.vocab_size
390
+ ):
391
+ self.word_embeddings = transformer_backbone.get_submodule(named_param.replace(".weight", ""))
392
+ break
393
+
394
+ if config.peft_type == PeftType.PROMPT_TUNING:
395
+ prompt_encoder = PromptEmbedding(config, self.word_embeddings)
396
+ elif config.peft_type == PeftType.MULTITASK_PROMPT_TUNING:
397
+ prompt_encoder = MultitaskPromptEmbedding(config, self.word_embeddings)
398
+ elif config.peft_type == PeftType.P_TUNING:
399
+ prompt_encoder = PromptEncoder(config)
400
+ elif config.peft_type == PeftType.PREFIX_TUNING:
401
+ prompt_encoder = PrefixEncoder(config)
402
+ else:
403
+ raise ValueError("Not supported")
404
+
405
+ prompt_encoder = prompt_encoder.to(self.device)
406
+ self.prompt_encoder.update(torch.nn.ModuleDict({adapter_name: prompt_encoder}))
407
+ self.prompt_tokens[adapter_name] = torch.arange(
408
+ config.num_virtual_tokens * config.num_transformer_submodules
409
+ ).long()
410
+
411
+ def _prepare_model_for_gradient_checkpointing(self, model: PreTrainedModel):
412
+ r"""
413
+ Prepares the model for gradient checkpointing if necessary
414
+ """
415
+ if not (
416
+ getattr(model, "is_loaded_in_8bit", False)
417
+ or getattr(model, "is_loaded_in_4bit", False)
418
+ or getattr(model, "is_quantized", False)
419
+ ):
420
+ if hasattr(model, "enable_input_require_grads"):
421
+ model.enable_input_require_grads()
422
+ elif hasattr(model, "get_input_embeddings"):
423
+
424
+ def make_inputs_require_grad(module, input, output):
425
+ output.requires_grad_(True)
426
+
427
+ model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
428
+ return model
429
+
430
+ def get_prompt_embedding_to_save(self, adapter_name: str) -> torch.Tensor:
431
+ """
432
+ Returns the prompt embedding to save when saving the model. Only applicable when using a prompt learning
433
+ method.
434
+ """
435
+ prompt_encoder = self.prompt_encoder[adapter_name]
436
+ prompt_tokens = (
437
+ self.prompt_tokens[adapter_name].unsqueeze(0).expand(1, -1).to(prompt_encoder.embedding.weight.device)
438
+ )
439
+ if self.peft_config[adapter_name].peft_type == PeftType.PREFIX_TUNING:
440
+ prompt_tokens = prompt_tokens[:, : self.peft_config[adapter_name].num_virtual_tokens]
441
+
442
+ if self.peft_config[adapter_name].peft_type == PeftType.MULTITASK_PROMPT_TUNING:
443
+ prompt_embeddings = super(MultitaskPromptEmbedding, prompt_encoder).forward(prompt_tokens)
444
+ else:
445
+ prompt_embeddings = prompt_encoder(prompt_tokens)
446
+
447
+ return prompt_embeddings[0].detach().cpu()
448
+
449
+ def get_prompt(self, batch_size: int, task_ids: Optional[torch.Tensor] = None) -> torch.Tensor:
450
+ """
451
+ Returns the virtual prompts to use for Peft. Only applicable when using a prompt learning method.
452
+ """
453
+ peft_config = self.active_peft_config
454
+ prompt_encoder = self.prompt_encoder[self.active_adapter]
455
+ prompt_tokens = (
456
+ self.prompt_tokens[self.active_adapter]
457
+ .unsqueeze(0)
458
+ .expand(batch_size, -1)
459
+ .to(prompt_encoder.embedding.weight.device)
460
+ )
461
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
462
+ prompt_tokens = prompt_tokens[:, : peft_config.num_virtual_tokens]
463
+ if peft_config.inference_mode:
464
+ past_key_values = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1)
465
+ else:
466
+ past_key_values = prompt_encoder(prompt_tokens)
467
+ if self.base_model_torch_dtype is not None:
468
+ past_key_values = past_key_values.to(self.base_model_torch_dtype)
469
+ past_key_values = past_key_values.view(
470
+ batch_size,
471
+ peft_config.num_virtual_tokens,
472
+ peft_config.num_layers * 2,
473
+ peft_config.num_attention_heads,
474
+ peft_config.token_dim // peft_config.num_attention_heads,
475
+ )
476
+ if peft_config.num_transformer_submodules == 2:
477
+ past_key_values = torch.cat([past_key_values, past_key_values], dim=2)
478
+ past_key_values = past_key_values.permute([2, 0, 3, 1, 4]).split(
479
+ peft_config.num_transformer_submodules * 2
480
+ )
481
+ if TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING.get(self.config.model_type, None) is not None:
482
+ post_process_fn = TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING[self.config.model_type]
483
+ past_key_values = post_process_fn(past_key_values)
484
+ return past_key_values
485
+ else:
486
+ if peft_config.peft_type == PeftType.MULTITASK_PROMPT_TUNING:
487
+ prompts = prompt_encoder(prompt_tokens, task_ids)
488
+ else:
489
+ if peft_config.inference_mode:
490
+ prompts = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1)
491
+ else:
492
+ prompts = prompt_encoder(prompt_tokens)
493
+ return prompts
494
+
495
+ def get_nb_trainable_parameters(self) -> tuple[int, int]:
496
+ r"""
497
+ Returns the number of trainable parameters and the number of all parameters in the model.
498
+ """
499
+ trainable_params = 0
500
+ all_param = 0
501
+ for _, param in self.named_parameters():
502
+ num_params = param.numel()
503
+ # if using DS Zero 3 and the weights are initialized empty
504
+ if num_params == 0 and hasattr(param, "ds_numel"):
505
+ num_params = param.ds_numel
506
+
507
+ # Due to the design of 4bit linear layers from bitsandbytes
508
+ # one needs to multiply the number of parameters by 2 to get
509
+ # the correct number of parameters
510
+ if param.__class__.__name__ == "Params4bit":
511
+ num_bytes = param.quant_storage.itemsize if hasattr(param, "quant_storage") else 1
512
+ num_params = num_params * 2 * num_bytes
513
+
514
+ all_param += num_params
515
+ if param.requires_grad:
516
+ trainable_params += num_params
517
+
518
+ return trainable_params, all_param
519
+
520
+ def print_trainable_parameters(self) -> None:
521
+ """
522
+ Prints the number of trainable parameters in the model.
523
+
524
+ Note: print_trainable_parameters() uses get_nb_trainable_parameters() which is different from
525
+ num_parameters(only_trainable=True) from huggingface/transformers. get_nb_trainable_parameters() returns
526
+ (trainable parameters, all parameters) of the Peft Model which includes modified backbone transformer model.
527
+ For techniques like LoRA, the backbone transformer model is modified in place with LoRA modules. However, for
528
+ prompt tuning, the backbone transformer model is unmodified. num_parameters(only_trainable=True) returns number
529
+ of trainable parameters of the backbone transformer model which can be different.
530
+ """
531
+ trainable_params, all_param = self.get_nb_trainable_parameters()
532
+
533
+ print(
534
+ f"trainable params: {trainable_params:,d} || all params: {all_param:,d} || trainable%: {100 * trainable_params / all_param}"
535
+ )
536
+
537
+ def __getattr__(self, name: str):
538
+ """Forward missing attributes to the wrapped module."""
539
+ try:
540
+ return super().__getattr__(name) # defer to nn.Module's logic
541
+ except AttributeError:
542
+ return getattr(self.base_model, name)
543
+
544
+ @contextmanager
545
+ def _enable_peft_forward_hooks(self, *args, **kwargs):
546
+ # If the base model has a method called _enable_peft_forward_hooks, it is invoked as a context. Otherwise, this
547
+ # runs without any changes
548
+ if hasattr(self.base_model, "_enable_peft_forward_hooks"):
549
+ with self.base_model._enable_peft_forward_hooks(*args, **kwargs):
550
+ yield
551
+ return
552
+ else:
553
+ # nothing to enable
554
+ yield
555
+ return
556
+
557
+ def forward(self, *args: Any, **kwargs: Any):
558
+ """
559
+ Forward pass of the model.
560
+ """
561
+ with self._enable_peft_forward_hooks(*args, **kwargs):
562
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
563
+ return self.get_base_model()(*args, **kwargs)
564
+
565
+ def generate(self, *args, **kwargs):
566
+ with self._enable_peft_forward_hooks(*args, **kwargs):
567
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
568
+ return self.get_base_model().generate(*args, **kwargs)
569
+
570
+ def _get_base_model_class(self, is_prompt_tuning=False):
571
+ """
572
+ Returns the base model class.
573
+ """
574
+ if not is_prompt_tuning:
575
+ return self.base_model.model.__class__
576
+ return self.base_model.__class__
577
+
578
+ @contextmanager
579
+ def disable_adapter(self):
580
+ """
581
+ Context manager that disables the adapter module. Use this to run inference on the base model.
582
+
583
+ Example:
584
+
585
+ ```py
586
+ >>> with model.disable_adapter():
587
+ ... model(inputs)
588
+ ```
589
+ """
590
+ try:
591
+ if self.peft_config[self.active_adapter].is_prompt_learning:
592
+ # TODO: consider replacing this patching of methods with a more robust mechanism: setting a flag and
593
+ # letting the underlying methods deal with it, same as how LoRA does it.
594
+ old_forward = self.forward
595
+ self.forward = self.base_model.forward
596
+ old_prepare_inputs_for_generation = self.prepare_inputs_for_generation
597
+ self.prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation
598
+ else:
599
+ self.base_model.disable_adapter_layers()
600
+ yield
601
+ finally:
602
+ if self.peft_config[self.active_adapter].is_prompt_learning:
603
+ self.forward = old_forward
604
+ self.prepare_inputs_for_generation = old_prepare_inputs_for_generation
605
+ else:
606
+ self.base_model.enable_adapter_layers()
607
+
608
+ def get_base_model(self) -> torch.nn.Module:
609
+ """
610
+ Returns the base model.
611
+ """
612
+ return (
613
+ self.base_model
614
+ if (self.active_peft_config.is_prompt_learning or self.peft_type == PeftType.POLY)
615
+ else self.base_model.model
616
+ )
617
+
618
+ def add_adapter(self, adapter_name: str, peft_config: PeftConfig) -> None:
619
+ """
620
+ Add an adapter to the model based on the passed configuration.
621
+
622
+ This adapter is not trained. To load a trained adapter, check out [`PeftModel.load_adapter`].
623
+
624
+ The name for the new adapter should be unique.
625
+
626
+ The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active
627
+ adapter.
628
+
629
+ Args:
630
+ adapter_name (`str`):
631
+ The name of the adapter to be added.
632
+ peft_config ([`PeftConfig`]):
633
+ The configuration of the adapter to be added.
634
+ """
635
+ if peft_config.peft_type != self.peft_type:
636
+ raise ValueError(
637
+ f"Cannot combine adapters with different peft types. "
638
+ f"Found {self.peft_type} and {peft_config.peft_type}."
639
+ )
640
+
641
+ try:
642
+ if peft_config.is_prompt_learning:
643
+ self.peft_config[adapter_name] = peft_config
644
+ if hasattr(self.config, "to_dict"):
645
+ dict_config = self.config.to_dict()
646
+ else:
647
+ dict_config = self.config
648
+
649
+ peft_config = _prepare_prompt_learning_config(peft_config, dict_config)
650
+ self._setup_prompt_encoder(adapter_name)
651
+ elif peft_config.is_adaption_prompt:
652
+ self.base_model.add_adapter(adapter_name, peft_config)
653
+ else:
654
+ self.peft_config[adapter_name] = peft_config
655
+ self.base_model.inject_adapter(self.base_model.model, adapter_name)
656
+ except Exception: # something went wrong, roll back
657
+ if adapter_name in self.peft_config:
658
+ del self.peft_config[adapter_name]
659
+ raise
660
+
661
+ self.set_additional_trainable_modules(peft_config, adapter_name)
662
+
663
+ def set_additional_trainable_modules(self, peft_config, adapter_name):
664
+ if getattr(peft_config, "modules_to_save", None) is not None:
665
+ if self.modules_to_save is None:
666
+ self.modules_to_save = set(peft_config.modules_to_save)
667
+ else:
668
+ self.modules_to_save.update(peft_config.modules_to_save)
669
+ _set_trainable(self, adapter_name)
670
+
671
+ @classmethod
672
+ def _split_kwargs(cls, kwargs: dict[str, Any]):
673
+ _kwargs_not_in_hf_hub_download_signature = ("use_auth_token",)
674
+ hf_hub_download_kwargs = {}
675
+ other_kwargs = {}
676
+
677
+ for key, value in kwargs.items():
678
+ if key in inspect.signature(hf_hub_download).parameters or key in _kwargs_not_in_hf_hub_download_signature:
679
+ hf_hub_download_kwargs[key] = value
680
+ else:
681
+ other_kwargs[key] = value
682
+
683
+ return hf_hub_download_kwargs, other_kwargs
684
+
685
+ def load_adapter(self, model_id: str, adapter_name: str, is_trainable: bool = False, **kwargs: Any):
686
+ """
687
+ Load a trained adapter into the model.
688
+
689
+ The name for the new adapter should be unique.
690
+
691
+ The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active
692
+ adapter.
693
+
694
+ Args:
695
+ adapter_name (`str`):
696
+ The name of the adapter to be added.
697
+ peft_config ([`PeftConfig`]):
698
+ The configuration of the adapter to be added.
699
+ is_trainable (`bool`, *optional*, defaults to `False`):
700
+ Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and can only be
701
+ used for inference.
702
+ kwargs: (`optional`):
703
+ Additional arguments to modify the way the adapter is loaded, e.g. the token for Hugging Face Hub.
704
+ """
705
+ from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING
706
+
707
+ hf_hub_download_kwargs, kwargs = self._split_kwargs(kwargs)
708
+ torch_device = infer_device()
709
+
710
+ if adapter_name not in self.peft_config:
711
+ # load the config
712
+ peft_config = PEFT_TYPE_TO_CONFIG_MAPPING[
713
+ PeftConfig._get_peft_type(
714
+ model_id,
715
+ **hf_hub_download_kwargs,
716
+ )
717
+ ].from_pretrained(
718
+ model_id,
719
+ **hf_hub_download_kwargs,
720
+ )
721
+ if peft_config.is_prompt_learning and is_trainable:
722
+ raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.")
723
+ else:
724
+ peft_config.inference_mode = not is_trainable
725
+ self.add_adapter(adapter_name, peft_config)
726
+
727
+ adapters_weights = load_peft_weights(model_id, device=torch_device, **hf_hub_download_kwargs)
728
+
729
+ # load the weights into the model
730
+ load_result = set_peft_model_state_dict(self, adapters_weights, adapter_name=adapter_name)
731
+ if (
732
+ (getattr(self, "hf_device_map", None) is not None)
733
+ and (len(set(self.hf_device_map.values()).intersection({"cpu", "disk"})) > 0)
734
+ and len(self.peft_config) == 1
735
+ ):
736
+ device_map = kwargs.get("device_map", "auto")
737
+ max_memory = kwargs.get("max_memory", None)
738
+ offload_dir = kwargs.get("offload_folder", None)
739
+ offload_index = kwargs.get("offload_index", None)
740
+
741
+ dispatch_model_kwargs = {}
742
+ # Safety checker for previous `accelerate` versions
743
+ # `offload_index` was introduced in https://github.com/huggingface/accelerate/pull/873/
744
+ if "offload_index" in inspect.signature(dispatch_model).parameters:
745
+ dispatch_model_kwargs["offload_index"] = offload_index
746
+
747
+ no_split_module_classes = self._no_split_modules
748
+
749
+ if device_map != "sequential":
750
+ max_memory = get_balanced_memory(
751
+ self,
752
+ max_memory=max_memory,
753
+ no_split_module_classes=no_split_module_classes,
754
+ low_zero=(device_map == "balanced_low_0"),
755
+ )
756
+ if isinstance(device_map, str):
757
+ device_map = infer_auto_device_map(
758
+ self, max_memory=max_memory, no_split_module_classes=no_split_module_classes
759
+ )
760
+ dispatch_model(
761
+ self,
762
+ device_map=device_map,
763
+ offload_dir=offload_dir,
764
+ **dispatch_model_kwargs,
765
+ )
766
+ hook = AlignDevicesHook(io_same_device=True)
767
+ if self.peft_config[adapter_name].is_prompt_learning:
768
+ remove_hook_from_submodules(self.prompt_encoder)
769
+ add_hook_to_module(self.get_base_model(), hook)
770
+
771
+ # Set model in evaluation mode to deactivate Dropout modules by default
772
+ if not is_trainable:
773
+ self.eval()
774
+ return load_result
775
+
776
+ def set_adapter(self, adapter_name: str) -> None:
777
+ """
778
+ Sets the active adapter.
779
+
780
+ Only one adapter can be active at a time.
781
+
782
+ Additionally, this function will set the specified adapter to trainable (i.e., requires_grad=True). If this is
783
+ not desired, use the following code.
784
+
785
+ ```py
786
+ >>> for name, param in model_peft.named_parameters():
787
+ ... if ...: # some check on name (ex. if 'lora' in name)
788
+ ... param.requires_grad = False
789
+ ```
790
+
791
+ Args:
792
+ adapter_name (`str`):
793
+ The name of the adapter to be set as active. The adapter must be loaded first.
794
+ """
795
+ if adapter_name not in self.peft_config:
796
+ raise ValueError(f"Adapter {adapter_name} not found.")
797
+ self.active_adapter = adapter_name
798
+ if not self.peft_config[adapter_name].is_prompt_learning:
799
+ self.base_model.set_adapter(adapter_name)
800
+ _set_adapter(self, adapter_name)
801
+
802
+ @property
803
+ def base_model_torch_dtype(self):
804
+ return getattr(self.base_model, "dtype", None)
805
+
806
+ @property
807
+ def active_peft_config(self):
808
+ return self.peft_config[self.active_adapter]
809
+
810
+ def create_or_update_model_card(self, output_dir: str):
811
+ """
812
+ Updates or create model card to include information about peft:
813
+ 1. Adds `peft` library tag
814
+ 2. Adds peft version
815
+ 3. Adds base model info
816
+ 4. Adds quantization information if it was used
817
+ """
818
+
819
+ filename = os.path.join(output_dir, "README.md")
820
+
821
+ card = ModelCard.load(filename) if os.path.exists(filename) else ModelCard.from_template(ModelCardData())
822
+
823
+ card.data["library_name"] = "peft"
824
+
825
+ model_config = getattr(self, "config", None)
826
+ if hasattr(model_config, "to_dict"):
827
+ model_config = model_config.to_dict()
828
+ if model_config is not None and "_name_or_path" in model_config:
829
+ card.data["base_model"] = model_config["_name_or_path"]
830
+
831
+ lines = card.text.splitlines()
832
+
833
+ quantization_config = None
834
+ if hasattr(model_config, "quantization_config"):
835
+ quantization_config = self.config.quantization_config.to_dict()
836
+ training_config_text = ""
837
+ quantization_prefix = "The following `bitsandbytes` quantization config was used during training:"
838
+ # Adds quantization information if it was used
839
+ if quantization_config is not None:
840
+ training_config_text += f"\n{quantization_prefix}\n"
841
+ training_config_text += "\n".join([f"- {name}: {value}" for name, value in quantization_config.items()])
842
+ training_config_text += "\n"
843
+
844
+ training_procedure_heading = "## Training procedure"
845
+ if quantization_prefix not in lines and bool(training_config_text):
846
+ if training_procedure_heading in lines:
847
+ lines.insert(lines.index(training_procedure_heading) + 2, training_config_text)
848
+ else:
849
+ lines.append(f"{training_procedure_heading}\n{training_config_text}")
850
+
851
+ # Adds peft version
852
+ framework_block_heading = "### Framework versions"
853
+ if f"- PEFT {__version__}" not in lines:
854
+ if framework_block_heading in lines:
855
+ lines.insert(lines.index(framework_block_heading) + 2, f"- PEFT {__version__}")
856
+ else:
857
+ lines.append(f"{framework_block_heading}\n\n- PEFT {__version__}")
858
+
859
+ card.text = "\n".join(lines)
860
+ card.save(filename)
861
+
862
+
863
+ class PeftModelForSequenceClassification(PeftModel):
864
+ """
865
+ Peft model for sequence classification tasks.
866
+
867
+ Args:
868
+ model ([`~transformers.PreTrainedModel`]): Base transformer model.
869
+ peft_config ([`PeftConfig`]): Peft config.
870
+
871
+ **Attributes**:
872
+ - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model.
873
+ - **cls_layer_name** (`str`) -- The name of the classification layer.
874
+
875
+ Example:
876
+
877
+ ```py
878
+ >>> from transformers import AutoModelForSequenceClassification
879
+ >>> from peft import PeftModelForSequenceClassification, get_peft_config
880
+
881
+ >>> config = {
882
+ ... "peft_type": "PREFIX_TUNING",
883
+ ... "task_type": "SEQ_CLS",
884
+ ... "inference_mode": False,
885
+ ... "num_virtual_tokens": 20,
886
+ ... "token_dim": 768,
887
+ ... "num_transformer_submodules": 1,
888
+ ... "num_attention_heads": 12,
889
+ ... "num_layers": 12,
890
+ ... "encoder_hidden_size": 768,
891
+ ... "prefix_projection": False,
892
+ ... "postprocess_past_key_value_function": None,
893
+ ... }
894
+
895
+ >>> peft_config = get_peft_config(config)
896
+ >>> model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased")
897
+ >>> peft_model = PeftModelForSequenceClassification(model, peft_config)
898
+ >>> peft_model.print_trainable_parameters()
899
+ trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117
900
+ ```
901
+ """
902
+
903
+ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None:
904
+ super().__init__(model, peft_config, adapter_name)
905
+ if self.modules_to_save is None:
906
+ self.modules_to_save = {"classifier", "score"}
907
+ else:
908
+ self.modules_to_save.update({"classifier", "score"})
909
+
910
+ for name, _ in self.base_model.named_children():
911
+ if any(module_name in name for module_name in self.modules_to_save):
912
+ self.cls_layer_name = name
913
+ break
914
+
915
+ # to make sure classifier layer is trainable
916
+ _set_trainable(self, adapter_name)
917
+
918
+ def forward(
919
+ self,
920
+ input_ids=None,
921
+ attention_mask=None,
922
+ inputs_embeds=None,
923
+ labels=None,
924
+ output_attentions=None,
925
+ output_hidden_states=None,
926
+ return_dict=None,
927
+ task_ids=None,
928
+ **kwargs,
929
+ ):
930
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
931
+ peft_config = self.active_peft_config
932
+ if not peft_config.is_prompt_learning:
933
+ with self._enable_peft_forward_hooks(**kwargs):
934
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
935
+ if peft_config.peft_type == PeftType.POLY:
936
+ kwargs["task_ids"] = task_ids
937
+ return self.base_model(
938
+ input_ids=input_ids,
939
+ attention_mask=attention_mask,
940
+ inputs_embeds=inputs_embeds,
941
+ labels=labels,
942
+ output_attentions=output_attentions,
943
+ output_hidden_states=output_hidden_states,
944
+ return_dict=return_dict,
945
+ **kwargs,
946
+ )
947
+
948
+ batch_size = _get_batch_size(input_ids, inputs_embeds)
949
+ if attention_mask is not None:
950
+ # concat prompt attention mask
951
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
952
+ attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
953
+ if kwargs.get("position_ids", None) is not None:
954
+ warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
955
+ kwargs["position_ids"] = None
956
+ kwargs.update(
957
+ {
958
+ "attention_mask": attention_mask,
959
+ "labels": labels,
960
+ "output_attentions": output_attentions,
961
+ "output_hidden_states": output_hidden_states,
962
+ "return_dict": return_dict,
963
+ }
964
+ )
965
+
966
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
967
+ return self._prefix_tuning_forward(input_ids=input_ids, **kwargs)
968
+ else:
969
+ if kwargs.get("token_type_ids", None) is not None:
970
+ kwargs["token_type_ids"] = torch.cat(
971
+ (
972
+ torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device),
973
+ kwargs["token_type_ids"],
974
+ ),
975
+ dim=1,
976
+ ).long()
977
+ if inputs_embeds is None:
978
+ inputs_embeds = self.word_embeddings(input_ids)
979
+ prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids)
980
+ prompts = prompts.to(inputs_embeds.dtype)
981
+ inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)
982
+ return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
983
+
984
+ def _prefix_tuning_forward(
985
+ self,
986
+ input_ids=None,
987
+ attention_mask=None,
988
+ inputs_embeds=None,
989
+ labels=None,
990
+ output_attentions=None,
991
+ output_hidden_states=None,
992
+ return_dict=None,
993
+ **kwargs,
994
+ ):
995
+ batch_size = _get_batch_size(input_ids, inputs_embeds)
996
+ past_key_values = self.get_prompt(batch_size)
997
+ fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys())
998
+ kwargs.update(
999
+ {
1000
+ "input_ids": input_ids,
1001
+ "attention_mask": attention_mask,
1002
+ "inputs_embeds": inputs_embeds,
1003
+ "output_attentions": output_attentions,
1004
+ "output_hidden_states": output_hidden_states,
1005
+ "return_dict": return_dict,
1006
+ "past_key_values": past_key_values,
1007
+ }
1008
+ )
1009
+ if "past_key_values" in fwd_params:
1010
+ return self.base_model(labels=labels, **kwargs)
1011
+ else:
1012
+ transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name)
1013
+ fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys())
1014
+ if "past_key_values" not in fwd_params:
1015
+ raise ValueError("Model does not support past key values which are required for prefix tuning.")
1016
+ outputs = transformer_backbone_name(**kwargs)
1017
+ pooled_output = outputs[1] if len(outputs) > 1 else outputs[0]
1018
+ if "dropout" in [name for name, _ in list(self.base_model.named_children())]:
1019
+ pooled_output = self.base_model.dropout(pooled_output)
1020
+ logits = self.base_model.get_submodule(self.cls_layer_name)(pooled_output)
1021
+
1022
+ loss = None
1023
+ if labels is not None:
1024
+ if self.config.problem_type is None:
1025
+ if self.base_model.num_labels == 1:
1026
+ self.config.problem_type = "regression"
1027
+ elif self.base_model.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1028
+ self.config.problem_type = "single_label_classification"
1029
+ else:
1030
+ self.config.problem_type = "multi_label_classification"
1031
+
1032
+ if self.config.problem_type == "regression":
1033
+ loss_fct = MSELoss()
1034
+ if self.base_model.num_labels == 1:
1035
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1036
+ else:
1037
+ loss = loss_fct(logits, labels)
1038
+ elif self.config.problem_type == "single_label_classification":
1039
+ loss_fct = CrossEntropyLoss()
1040
+ loss = loss_fct(logits.view(-1, self.base_model.num_labels), labels.view(-1))
1041
+ elif self.config.problem_type == "multi_label_classification":
1042
+ loss_fct = BCEWithLogitsLoss()
1043
+ loss = loss_fct(logits, labels)
1044
+ if not return_dict:
1045
+ output = (logits,) + outputs[2:]
1046
+ return ((loss,) + output) if loss is not None else output
1047
+
1048
+ return SequenceClassifierOutput(
1049
+ loss=loss,
1050
+ logits=logits,
1051
+ hidden_states=outputs.hidden_states,
1052
+ attentions=outputs.attentions,
1053
+ )
1054
+
1055
+
1056
+ class PeftModelForCausalLM(PeftModel):
1057
+ """
1058
+ Peft model for causal language modeling.
1059
+
1060
+ Args:
1061
+ model ([`~transformers.PreTrainedModel`]): Base transformer model.
1062
+ peft_config ([`PeftConfig`]): Peft config.
1063
+
1064
+
1065
+ Example:
1066
+
1067
+ ```py
1068
+ >>> from transformers import AutoModelForCausalLM
1069
+ >>> from peft import PeftModelForCausalLM, get_peft_config
1070
+
1071
+ >>> config = {
1072
+ ... "peft_type": "PREFIX_TUNING",
1073
+ ... "task_type": "CAUSAL_LM",
1074
+ ... "inference_mode": False,
1075
+ ... "num_virtual_tokens": 20,
1076
+ ... "token_dim": 1280,
1077
+ ... "num_transformer_submodules": 1,
1078
+ ... "num_attention_heads": 20,
1079
+ ... "num_layers": 36,
1080
+ ... "encoder_hidden_size": 1280,
1081
+ ... "prefix_projection": False,
1082
+ ... "postprocess_past_key_value_function": None,
1083
+ ... }
1084
+
1085
+ >>> peft_config = get_peft_config(config)
1086
+ >>> model = AutoModelForCausalLM.from_pretrained("gpt2-large")
1087
+ >>> peft_model = PeftModelForCausalLM(model, peft_config)
1088
+ >>> peft_model.print_trainable_parameters()
1089
+ trainable params: 1843200 || all params: 775873280 || trainable%: 0.23756456724479544
1090
+ ```
1091
+ """
1092
+
1093
+ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None:
1094
+ super().__init__(model, peft_config, adapter_name)
1095
+ self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation
1096
+
1097
+ def forward(
1098
+ self,
1099
+ input_ids=None,
1100
+ attention_mask=None,
1101
+ inputs_embeds=None,
1102
+ labels=None,
1103
+ output_attentions=None,
1104
+ output_hidden_states=None,
1105
+ return_dict=None,
1106
+ task_ids=None,
1107
+ **kwargs,
1108
+ ):
1109
+ peft_config = self.active_peft_config
1110
+ if not peft_config.is_prompt_learning:
1111
+ if self.base_model.config.model_type == "mpt":
1112
+ if inputs_embeds is not None:
1113
+ raise AssertionError("forward in MPTForCausalLM does not support inputs_embeds")
1114
+ return self.base_model(
1115
+ input_ids=input_ids,
1116
+ attention_mask=attention_mask,
1117
+ labels=labels,
1118
+ output_attentions=output_attentions,
1119
+ output_hidden_states=output_hidden_states,
1120
+ return_dict=return_dict,
1121
+ **kwargs,
1122
+ )
1123
+
1124
+ if peft_config.peft_type == PeftType.POLY:
1125
+ kwargs["task_ids"] = task_ids
1126
+
1127
+ with self._enable_peft_forward_hooks(**kwargs):
1128
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
1129
+ return self.base_model(
1130
+ input_ids=input_ids,
1131
+ attention_mask=attention_mask,
1132
+ inputs_embeds=inputs_embeds,
1133
+ labels=labels,
1134
+ output_attentions=output_attentions,
1135
+ output_hidden_states=output_hidden_states,
1136
+ return_dict=return_dict,
1137
+ **kwargs,
1138
+ )
1139
+
1140
+ batch_size = _get_batch_size(input_ids, inputs_embeds)
1141
+ if attention_mask is not None:
1142
+ # concat prompt attention mask
1143
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
1144
+ attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
1145
+
1146
+ if kwargs.get("position_ids", None) is not None:
1147
+ warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
1148
+ kwargs["position_ids"] = None
1149
+ if kwargs.get("token_type_ids", None) is not None:
1150
+ warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids")
1151
+ kwargs["token_type_ids"] = None
1152
+ kwargs.update(
1153
+ {
1154
+ "attention_mask": attention_mask,
1155
+ "labels": labels,
1156
+ "output_attentions": output_attentions,
1157
+ "output_hidden_states": output_hidden_states,
1158
+ "return_dict": return_dict,
1159
+ }
1160
+ )
1161
+
1162
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
1163
+ past_key_values = self.get_prompt(batch_size)
1164
+ return self.base_model(
1165
+ input_ids=input_ids, inputs_embeds=inputs_embeds, past_key_values=past_key_values, **kwargs
1166
+ )
1167
+ else:
1168
+ if inputs_embeds is None:
1169
+ inputs_embeds = self.word_embeddings(input_ids)
1170
+ # concat prompt labels
1171
+ if labels is not None:
1172
+ prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(labels.device)
1173
+ kwargs["labels"] = torch.cat((prefix_labels, labels), dim=1)
1174
+ prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids)
1175
+ prompts = prompts.to(inputs_embeds.dtype)
1176
+ inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)
1177
+ return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
1178
+
1179
+ def generate(self, *args, **kwargs):
1180
+ peft_config = self.active_peft_config
1181
+ self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation
1182
+ if hasattr(self.base_model, "model"):
1183
+ self.base_model.model.generation_config = self.generation_config
1184
+ else:
1185
+ self.base_model.generation_config = self.generation_config
1186
+ try:
1187
+ if not peft_config.is_prompt_learning:
1188
+ with self._enable_peft_forward_hooks(*args, **kwargs):
1189
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
1190
+ outputs = self.base_model.generate(*args, **kwargs)
1191
+ else:
1192
+ outputs = self.base_model.generate(**kwargs)
1193
+ except:
1194
+ self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation
1195
+ raise
1196
+ else:
1197
+ self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation
1198
+ return outputs
1199
+
1200
+ def prepare_inputs_for_generation(self, *args, task_ids: Optional[torch.Tensor] = None, **kwargs):
1201
+ peft_config = self.active_peft_config
1202
+ model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs)
1203
+
1204
+ # https://github.com/huggingface/transformers/pull/26681/ introduced new cache format
1205
+ # for some architectures which requires a special fix for prompt tuning etc.
1206
+ # TODO: starting with transformers 4.38, all architectures should support caching.
1207
+ uses_transformers_4_38 = packaging.version.parse(transformers.__version__) >= packaging.version.parse("4.38.0")
1208
+ uses_transformers_4_36 = packaging.version.parse(transformers.__version__) >= packaging.version.parse("4.36.0")
1209
+ transformers_new_cache_archs = ["llama", "mistral", "persimmon", "phi"]
1210
+ uses_cache = uses_transformers_4_38 or (
1211
+ uses_transformers_4_36 and self.base_model.config.model_type in transformers_new_cache_archs
1212
+ )
1213
+
1214
+ if peft_config.peft_type == PeftType.POLY:
1215
+ model_kwargs["task_ids"] = task_ids
1216
+ if peft_config.is_prompt_learning:
1217
+ if uses_cache and (model_kwargs["past_key_values"] is not None):
1218
+ # change in the logic of `prepare_inputs_for_generation` makes the below code necessary
1219
+ # In prompt learning methods, past key values are longer when compared to the `input_ids`.
1220
+ # As such only consider the last input ids in the autogressive generation phase.
1221
+ if model_kwargs["past_key_values"][0][0].shape[-2] >= model_kwargs["input_ids"].shape[1]:
1222
+ model_kwargs["input_ids"] = model_kwargs["input_ids"][:, -1:]
1223
+
1224
+ if model_kwargs.get("attention_mask", None) is not None:
1225
+ size = model_kwargs["input_ids"].shape[0], peft_config.num_virtual_tokens
1226
+ prefix_attention_mask = torch.ones(size).to(model_kwargs["input_ids"].device)
1227
+ model_kwargs["attention_mask"] = torch.cat(
1228
+ (prefix_attention_mask, model_kwargs["attention_mask"]), dim=1
1229
+ )
1230
+
1231
+ if model_kwargs.get("position_ids", None) is not None:
1232
+ warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
1233
+ model_kwargs["position_ids"] = None
1234
+
1235
+ if kwargs.get("token_type_ids", None) is not None:
1236
+ warnings.warn(
1237
+ "Token type ids are not supported for parameter efficient tuning. Ignoring token type ids"
1238
+ )
1239
+ kwargs["token_type_ids"] = None
1240
+
1241
+ if model_kwargs["past_key_values"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING:
1242
+ past_key_values = self.get_prompt(batch_size=model_kwargs["input_ids"].shape[0])
1243
+ model_kwargs["past_key_values"] = past_key_values
1244
+ else:
1245
+ if model_kwargs["past_key_values"] is None:
1246
+ inputs_embeds = self.word_embeddings(model_kwargs["input_ids"])
1247
+ prompts = self.get_prompt(batch_size=model_kwargs["input_ids"].shape[0], task_ids=task_ids)
1248
+ prompts = prompts.to(inputs_embeds.dtype)
1249
+ model_kwargs["inputs_embeds"] = torch.cat((prompts, inputs_embeds), dim=1)
1250
+ model_kwargs["input_ids"] = None
1251
+
1252
+ # For transformers>=4.38.0 - for some architectures such as Llama, `cache_position` is
1253
+ # passed in the forward pass to keep track of the position ids of the cache. We have to
1254
+ # pop that from `model_kwargs` as `cache_position` is properly created by the model, using the passed
1255
+ # `inputs_embeds`: https://github.com/huggingface/transformers/blob/593230f0a1150ea9c0477b9d859f25daf73c8c33/src/transformers/models/llama/modeling_llama.py#L956
1256
+ _ = model_kwargs.pop("cache_position", None)
1257
+
1258
+ return model_kwargs
1259
+
1260
+
1261
+ class PeftModelForSeq2SeqLM(PeftModel):
1262
+ """
1263
+ Peft model for sequence-to-sequence language modeling.
1264
+
1265
+ Args:
1266
+ model ([`~transformers.PreTrainedModel`]): Base transformer model.
1267
+ peft_config ([`PeftConfig`]): Peft config.
1268
+
1269
+
1270
+ Example:
1271
+
1272
+ ```py
1273
+ >>> from transformers import AutoModelForSeq2SeqLM
1274
+ >>> from peft import PeftModelForSeq2SeqLM, get_peft_config
1275
+
1276
+ >>> config = {
1277
+ ... "peft_type": "LORA",
1278
+ ... "task_type": "SEQ_2_SEQ_LM",
1279
+ ... "inference_mode": False,
1280
+ ... "r": 8,
1281
+ ... "target_modules": ["q", "v"],
1282
+ ... "lora_alpha": 32,
1283
+ ... "lora_dropout": 0.1,
1284
+ ... "fan_in_fan_out": False,
1285
+ ... "enable_lora": None,
1286
+ ... "bias": "none",
1287
+ ... }
1288
+
1289
+ >>> peft_config = get_peft_config(config)
1290
+ >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
1291
+ >>> peft_model = PeftModelForSeq2SeqLM(model, peft_config)
1292
+ >>> peft_model.print_trainable_parameters()
1293
+ trainable params: 884736 || all params: 223843584 || trainable%: 0.3952474242013566
1294
+ ```
1295
+ """
1296
+
1297
+ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None:
1298
+ super().__init__(model, peft_config, adapter_name)
1299
+ self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation
1300
+ self.base_model_prepare_encoder_decoder_kwargs_for_generation = (
1301
+ self.base_model._prepare_encoder_decoder_kwargs_for_generation
1302
+ )
1303
+
1304
+ def forward(
1305
+ self,
1306
+ input_ids=None,
1307
+ attention_mask=None,
1308
+ inputs_embeds=None,
1309
+ decoder_input_ids=None,
1310
+ decoder_attention_mask=None,
1311
+ decoder_inputs_embeds=None,
1312
+ labels=None,
1313
+ output_attentions=None,
1314
+ output_hidden_states=None,
1315
+ return_dict=None,
1316
+ task_ids=None,
1317
+ **kwargs,
1318
+ ):
1319
+ peft_config = self.active_peft_config
1320
+ if not peft_config.is_prompt_learning:
1321
+ if peft_config.peft_type == PeftType.POLY:
1322
+ kwargs["task_ids"] = task_ids
1323
+
1324
+ with self._enable_peft_forward_hooks(**kwargs):
1325
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
1326
+ return self.base_model(
1327
+ input_ids=input_ids,
1328
+ attention_mask=attention_mask,
1329
+ inputs_embeds=inputs_embeds,
1330
+ decoder_input_ids=decoder_input_ids,
1331
+ decoder_attention_mask=decoder_attention_mask,
1332
+ decoder_inputs_embeds=decoder_inputs_embeds,
1333
+ labels=labels,
1334
+ output_attentions=output_attentions,
1335
+ output_hidden_states=output_hidden_states,
1336
+ return_dict=return_dict,
1337
+ **kwargs,
1338
+ )
1339
+
1340
+ batch_size = _get_batch_size(input_ids, inputs_embeds)
1341
+ if decoder_attention_mask is not None:
1342
+ # concat prompt attention mask
1343
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(
1344
+ decoder_attention_mask.device
1345
+ )
1346
+ if peft_config.peft_type not in [PeftType.PROMPT_TUNING, PeftType.P_TUNING]:
1347
+ decoder_attention_mask = torch.cat((prefix_attention_mask, decoder_attention_mask), dim=1)
1348
+
1349
+ if kwargs.get("position_ids", None) is not None:
1350
+ warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
1351
+ kwargs["position_ids"] = None
1352
+ if kwargs.get("token_type_ids", None) is not None:
1353
+ warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids")
1354
+ kwargs["token_type_ids"] = None
1355
+ kwargs.update(
1356
+ {
1357
+ "attention_mask": attention_mask,
1358
+ "decoder_attention_mask": decoder_attention_mask,
1359
+ "labels": labels,
1360
+ "output_attentions": output_attentions,
1361
+ "output_hidden_states": output_hidden_states,
1362
+ "return_dict": return_dict,
1363
+ }
1364
+ )
1365
+
1366
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
1367
+ past_key_values = self.get_prompt(batch_size)
1368
+ return self.base_model(
1369
+ input_ids=input_ids,
1370
+ decoder_input_ids=decoder_input_ids,
1371
+ decoder_inputs_embeds=decoder_inputs_embeds,
1372
+ past_key_values=past_key_values,
1373
+ **kwargs,
1374
+ )
1375
+ elif peft_config.peft_type in [PeftType.PROMPT_TUNING, PeftType.P_TUNING]:
1376
+ if inputs_embeds is None:
1377
+ inputs_embeds = self.word_embeddings(input_ids)
1378
+
1379
+ if attention_mask is not None:
1380
+ # concat prompt attention mask
1381
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(
1382
+ attention_mask.device
1383
+ )
1384
+ kwargs["attention_mask"] = torch.cat((prefix_attention_mask, attention_mask), dim=1)
1385
+
1386
+ prompts = self.get_prompt(batch_size=batch_size)
1387
+ prompts = prompts.to(inputs_embeds.dtype)
1388
+ inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1)
1389
+
1390
+ return self.base_model(
1391
+ inputs_embeds=inputs_embeds,
1392
+ decoder_input_ids=decoder_input_ids,
1393
+ decoder_inputs_embeds=decoder_inputs_embeds,
1394
+ **kwargs,
1395
+ )
1396
+ else:
1397
+ if inputs_embeds is None:
1398
+ inputs_embeds = self.word_embeddings(input_ids)
1399
+ if decoder_inputs_embeds is None and decoder_input_ids is None:
1400
+ decoder_input_ids = shift_tokens_right(
1401
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
1402
+ )
1403
+ decoder_inputs_embeds = self.word_embeddings(decoder_input_ids)
1404
+
1405
+ if attention_mask is not None:
1406
+ # concat prompt attention mask
1407
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(
1408
+ attention_mask.device
1409
+ )
1410
+ kwargs["attention_mask"] = torch.cat((prefix_attention_mask, attention_mask), dim=1)
1411
+ # concat prompt labels
1412
+ if labels is not None:
1413
+ if peft_config.num_transformer_submodules == 1:
1414
+ kwargs["labels"] = labels
1415
+ elif peft_config.num_transformer_submodules == 2:
1416
+ prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(labels.device)
1417
+ kwargs["labels"] = torch.cat((prefix_labels, labels), dim=1)
1418
+ prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids)
1419
+ prompts = prompts.to(inputs_embeds.dtype)
1420
+ inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1)
1421
+ if peft_config.num_transformer_submodules == 1:
1422
+ return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
1423
+ elif peft_config.num_transformer_submodules == 2:
1424
+ decoder_inputs_embeds = torch.cat(
1425
+ (prompts[:, peft_config.num_virtual_tokens :], decoder_inputs_embeds), dim=1
1426
+ )
1427
+ return self.base_model(
1428
+ inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, **kwargs
1429
+ )
1430
+
1431
+ def generate(self, **kwargs):
1432
+ peft_config = self.active_peft_config
1433
+ self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation
1434
+ self.base_model._prepare_encoder_decoder_kwargs_for_generation = (
1435
+ self._prepare_encoder_decoder_kwargs_for_generation
1436
+ )
1437
+ try:
1438
+ if not peft_config.is_prompt_learning:
1439
+ with self._enable_peft_forward_hooks(**kwargs):
1440
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
1441
+ outputs = self.base_model.generate(**kwargs)
1442
+ else:
1443
+ if "input_ids" not in kwargs:
1444
+ raise ValueError("input_ids must be provided for Peft model generation")
1445
+ if kwargs.get("position_ids", None) is not None:
1446
+ warnings.warn(
1447
+ "Position ids are not supported for parameter efficient tuning. Ignoring position ids."
1448
+ )
1449
+ kwargs["position_ids"] = None
1450
+ if kwargs.get("token_type_ids", None) is not None:
1451
+ warnings.warn(
1452
+ "Token type ids are not supported for parameter efficient tuning. Ignoring token type ids"
1453
+ )
1454
+ kwargs["token_type_ids"] = None
1455
+
1456
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
1457
+ outputs = self.base_model.generate(**kwargs)
1458
+ elif peft_config.peft_type in [
1459
+ PeftType.PROMPT_TUNING,
1460
+ PeftType.P_TUNING,
1461
+ PeftType.MULTITASK_PROMPT_TUNING,
1462
+ ]:
1463
+ kwargs = deepcopy(kwargs)
1464
+
1465
+ if "encoder_outputs" in kwargs:
1466
+ del kwargs["encoder_outputs"]
1467
+ warnings.warn(
1468
+ "`encoder_outputs` should not be passed to `generate` when using prompt tuning. Ignoring it."
1469
+ )
1470
+
1471
+ input_ids = kwargs.pop("input_ids")
1472
+ inputs_embeds = self.word_embeddings(input_ids)
1473
+ batch_size = inputs_embeds.shape[0]
1474
+ prompts = self.get_prompt(batch_size=batch_size, task_ids=kwargs.pop("task_ids", None))
1475
+ prompts = prompts.to(inputs_embeds.dtype)
1476
+
1477
+ inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1)
1478
+ kwargs["inputs_embeds"] = inputs_embeds
1479
+
1480
+ if "attention_mask" in kwargs:
1481
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(
1482
+ kwargs["attention_mask"].device
1483
+ )
1484
+ kwargs["attention_mask"] = torch.cat((prefix_attention_mask, kwargs["attention_mask"]), dim=1)
1485
+
1486
+ return self.base_model.generate(**kwargs)
1487
+ else:
1488
+ raise NotImplementedError
1489
+ except:
1490
+ self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation
1491
+ self.base_model._prepare_encoder_decoder_kwargs_for_generation = (
1492
+ self.base_model_prepare_encoder_decoder_kwargs_for_generation
1493
+ )
1494
+ raise
1495
+ else:
1496
+ self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation
1497
+ self.base_model._prepare_encoder_decoder_kwargs_for_generation = (
1498
+ self.base_model_prepare_encoder_decoder_kwargs_for_generation
1499
+ )
1500
+ return outputs
1501
+
1502
+ def prepare_inputs_for_generation(self, *args, task_ids: torch.Tensor = None, **kwargs):
1503
+ peft_config = self.active_peft_config
1504
+ model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs)
1505
+ if peft_config.peft_type == PeftType.POLY:
1506
+ model_kwargs["task_ids"] = task_ids
1507
+ if model_kwargs["past_key_values"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING:
1508
+ batch_size = model_kwargs["decoder_input_ids"].shape[0]
1509
+ past_key_values = self.get_prompt(batch_size)
1510
+ model_kwargs["past_key_values"] = past_key_values
1511
+
1512
+ return model_kwargs
1513
+
1514
+
1515
+ class PeftModelForTokenClassification(PeftModel):
1516
+ """
1517
+ Peft model for token classification tasks.
1518
+
1519
+ Args:
1520
+ model ([`~transformers.PreTrainedModel`]): Base transformer model.
1521
+ peft_config ([`PeftConfig`]): Peft config.
1522
+
1523
+ **Attributes**:
1524
+ - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model.
1525
+ - **cls_layer_name** (`str`) -- The name of the classification layer.
1526
+
1527
+ Example:
1528
+
1529
+ ```py
1530
+ >>> from transformers import AutoModelForSequenceClassification
1531
+ >>> from peft import PeftModelForTokenClassification, get_peft_config
1532
+
1533
+ >>> config = {
1534
+ ... "peft_type": "PREFIX_TUNING",
1535
+ ... "task_type": "TOKEN_CLS",
1536
+ ... "inference_mode": False,
1537
+ ... "num_virtual_tokens": 20,
1538
+ ... "token_dim": 768,
1539
+ ... "num_transformer_submodules": 1,
1540
+ ... "num_attention_heads": 12,
1541
+ ... "num_layers": 12,
1542
+ ... "encoder_hidden_size": 768,
1543
+ ... "prefix_projection": False,
1544
+ ... "postprocess_past_key_value_function": None,
1545
+ ... }
1546
+
1547
+ >>> peft_config = get_peft_config(config)
1548
+ >>> model = AutoModelForTokenClassification.from_pretrained("bert-base-cased")
1549
+ >>> peft_model = PeftModelForTokenClassification(model, peft_config)
1550
+ >>> peft_model.print_trainable_parameters()
1551
+ trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117
1552
+ ```
1553
+ """
1554
+
1555
+ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig = None, adapter_name: str = "default") -> None:
1556
+ super().__init__(model, peft_config, adapter_name)
1557
+ if self.modules_to_save is None:
1558
+ self.modules_to_save = {"classifier", "score"}
1559
+ else:
1560
+ self.modules_to_save.update({"classifier", "score"})
1561
+
1562
+ for name, _ in self.base_model.named_children():
1563
+ if any(module_name in name for module_name in self.modules_to_save):
1564
+ self.cls_layer_name = name
1565
+ break
1566
+
1567
+ # to make sure classifier layer is trainable
1568
+ _set_trainable(self, adapter_name)
1569
+
1570
+ def forward(
1571
+ self,
1572
+ input_ids=None,
1573
+ attention_mask=None,
1574
+ inputs_embeds=None,
1575
+ labels=None,
1576
+ output_attentions=None,
1577
+ output_hidden_states=None,
1578
+ return_dict=None,
1579
+ task_ids=None,
1580
+ **kwargs,
1581
+ ):
1582
+ peft_config = self.active_peft_config
1583
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1584
+
1585
+ if not peft_config.is_prompt_learning:
1586
+ with self._enable_peft_forward_hooks(**kwargs):
1587
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
1588
+ if peft_config.peft_type == PeftType.POLY:
1589
+ kwargs["task_ids"] = task_ids
1590
+ return self.base_model(
1591
+ input_ids=input_ids,
1592
+ attention_mask=attention_mask,
1593
+ inputs_embeds=inputs_embeds,
1594
+ labels=labels,
1595
+ output_attentions=output_attentions,
1596
+ output_hidden_states=output_hidden_states,
1597
+ return_dict=return_dict,
1598
+ **kwargs,
1599
+ )
1600
+
1601
+ batch_size = _get_batch_size(input_ids, inputs_embeds)
1602
+ if attention_mask is not None:
1603
+ # concat prompt attention mask
1604
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
1605
+ attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
1606
+ if kwargs.get("position_ids", None) is not None:
1607
+ warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
1608
+ kwargs["position_ids"] = None
1609
+ kwargs.update(
1610
+ {
1611
+ "attention_mask": attention_mask,
1612
+ "labels": labels,
1613
+ "output_attentions": output_attentions,
1614
+ "output_hidden_states": output_hidden_states,
1615
+ "return_dict": return_dict,
1616
+ }
1617
+ )
1618
+
1619
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
1620
+ return self._prefix_tuning_forward(input_ids=input_ids, **kwargs)
1621
+ else:
1622
+ if kwargs.get("token_type_ids", None) is not None:
1623
+ kwargs["token_type_ids"] = torch.cat(
1624
+ (
1625
+ torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device),
1626
+ kwargs["token_type_ids"],
1627
+ ),
1628
+ dim=1,
1629
+ ).long()
1630
+ if inputs_embeds is None:
1631
+ inputs_embeds = self.word_embeddings(input_ids)
1632
+ prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids)
1633
+ prompts = prompts.to(inputs_embeds.dtype)
1634
+ inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)
1635
+ return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
1636
+
1637
+ def _prefix_tuning_forward(
1638
+ self,
1639
+ input_ids=None,
1640
+ attention_mask=None,
1641
+ inputs_embeds=None,
1642
+ labels=None,
1643
+ output_attentions=None,
1644
+ output_hidden_states=None,
1645
+ return_dict=None,
1646
+ **kwargs,
1647
+ ):
1648
+ batch_size = _get_batch_size(input_ids, inputs_embeds)
1649
+ past_key_values = self.get_prompt(batch_size)
1650
+ fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys())
1651
+ kwargs.update(
1652
+ {
1653
+ "input_ids": input_ids,
1654
+ "attention_mask": attention_mask,
1655
+ "inputs_embeds": inputs_embeds,
1656
+ "output_attentions": output_attentions,
1657
+ "output_hidden_states": output_hidden_states,
1658
+ "return_dict": return_dict,
1659
+ "past_key_values": past_key_values,
1660
+ }
1661
+ )
1662
+ if "past_key_values" in fwd_params:
1663
+ return self.base_model(labels=labels, **kwargs)
1664
+ else:
1665
+ transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name)
1666
+ fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys())
1667
+ if "past_key_values" not in fwd_params:
1668
+ raise ValueError("Model does not support past key values which are required for prefix tuning.")
1669
+ outputs = transformer_backbone_name(**kwargs)
1670
+ sequence_output = outputs[0]
1671
+ if "dropout" in [name for name, _ in list(self.base_model.named_children())]:
1672
+ sequence_output = self.base_model.dropout(sequence_output)
1673
+ logits = self.base_model.get_submodule(self.cls_layer_name)(sequence_output)
1674
+
1675
+ loss = None
1676
+ if labels is not None:
1677
+ loss_fct = CrossEntropyLoss()
1678
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1679
+
1680
+ if not return_dict:
1681
+ output = (logits,) + outputs[2:]
1682
+ return ((loss,) + output) if loss is not None else output
1683
+
1684
+ return TokenClassifierOutput(
1685
+ loss=loss,
1686
+ logits=logits,
1687
+ hidden_states=outputs.hidden_states,
1688
+ attentions=outputs.attentions,
1689
+ )
1690
+
1691
+
1692
+ class PeftModelForQuestionAnswering(PeftModel):
1693
+ """
1694
+ Peft model for extractive question answering.
1695
+
1696
+ Args:
1697
+ model ([`~transformers.PreTrainedModel`]): Base transformer model.
1698
+ peft_config ([`PeftConfig`]): Peft config.
1699
+
1700
+ **Attributes**:
1701
+ - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model.
1702
+ - **cls_layer_name** (`str`) -- The name of the classification layer.
1703
+
1704
+ Example:
1705
+
1706
+ ```py
1707
+ >>> from transformers import AutoModelForQuestionAnswering
1708
+ >>> from peft import PeftModelForQuestionAnswering, get_peft_config
1709
+
1710
+ >>> config = {
1711
+ ... "peft_type": "LORA",
1712
+ ... "task_type": "QUESTION_ANS",
1713
+ ... "inference_mode": False,
1714
+ ... "r": 16,
1715
+ ... "target_modules": ["query", "value"],
1716
+ ... "lora_alpha": 32,
1717
+ ... "lora_dropout": 0.05,
1718
+ ... "fan_in_fan_out": False,
1719
+ ... "bias": "none",
1720
+ ... }
1721
+
1722
+ >>> peft_config = get_peft_config(config)
1723
+ >>> model = AutoModelForQuestionAnswering.from_pretrained("bert-base-cased")
1724
+ >>> peft_model = PeftModelForQuestionAnswering(model, peft_config)
1725
+ >>> peft_model.print_trainable_parameters()
1726
+ trainable params: 592900 || all params: 108312580 || trainable%: 0.5473971721475013
1727
+ ```
1728
+ """
1729
+
1730
+ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None:
1731
+ super().__init__(model, peft_config, adapter_name)
1732
+ if self.modules_to_save is None:
1733
+ self.modules_to_save = {"qa_outputs"}
1734
+ else:
1735
+ self.modules_to_save.update({"qa_outputs"})
1736
+
1737
+ for name, _ in self.base_model.named_children():
1738
+ if any(module_name in name for module_name in self.modules_to_save):
1739
+ self.cls_layer_name = name
1740
+ break
1741
+
1742
+ # to make sure classifier layer is trainable
1743
+ _set_trainable(self, adapter_name)
1744
+
1745
+ def forward(
1746
+ self,
1747
+ input_ids=None,
1748
+ attention_mask=None,
1749
+ token_type_ids=None,
1750
+ position_ids=None,
1751
+ inputs_embeds=None,
1752
+ start_positions=None,
1753
+ end_positions=None,
1754
+ output_attentions=None,
1755
+ output_hidden_states=None,
1756
+ return_dict=None,
1757
+ task_ids=None,
1758
+ **kwargs,
1759
+ ):
1760
+ peft_config = self.active_peft_config
1761
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1762
+
1763
+ if not peft_config.is_prompt_learning:
1764
+ if peft_config.peft_type == PeftType.POLY:
1765
+ kwargs["task_ids"] = task_ids
1766
+
1767
+ with self._enable_peft_forward_hooks(**kwargs):
1768
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
1769
+ return self.base_model(
1770
+ input_ids=input_ids,
1771
+ attention_mask=attention_mask,
1772
+ inputs_embeds=inputs_embeds,
1773
+ start_positions=start_positions,
1774
+ end_positions=end_positions,
1775
+ output_attentions=output_attentions,
1776
+ output_hidden_states=output_hidden_states,
1777
+ return_dict=return_dict,
1778
+ **kwargs,
1779
+ )
1780
+
1781
+ batch_size = _get_batch_size(input_ids, inputs_embeds)
1782
+ if attention_mask is not None:
1783
+ # concat prompt attention mask
1784
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
1785
+ attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
1786
+ if kwargs.get("position_ids", None) is not None:
1787
+ warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
1788
+ kwargs["position_ids"] = None
1789
+ kwargs.update(
1790
+ {
1791
+ "attention_mask": attention_mask,
1792
+ "start_positions": start_positions,
1793
+ "end_positions": end_positions,
1794
+ "output_attentions": output_attentions,
1795
+ "output_hidden_states": output_hidden_states,
1796
+ "return_dict": return_dict,
1797
+ }
1798
+ )
1799
+
1800
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
1801
+ return self._prefix_tuning_forward(input_ids=input_ids, **kwargs)
1802
+ else:
1803
+ if kwargs.get("token_type_ids", None) is not None:
1804
+ kwargs["token_type_ids"] = torch.cat(
1805
+ (
1806
+ torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device),
1807
+ kwargs["token_type_ids"],
1808
+ ),
1809
+ dim=1,
1810
+ ).long()
1811
+ if inputs_embeds is None:
1812
+ inputs_embeds = self.word_embeddings(input_ids)
1813
+ prompts = self.get_prompt(batch_size=batch_size)
1814
+ prompts = prompts.to(inputs_embeds.dtype)
1815
+ inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)
1816
+ return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
1817
+
1818
+ def _prefix_tuning_forward(
1819
+ self,
1820
+ input_ids=None,
1821
+ attention_mask=None,
1822
+ inputs_embeds=None,
1823
+ start_positions=None,
1824
+ end_positions=None,
1825
+ output_attentions=None,
1826
+ output_hidden_states=None,
1827
+ return_dict=None,
1828
+ **kwargs,
1829
+ ):
1830
+ batch_size = _get_batch_size(input_ids, inputs_embeds)
1831
+ past_key_values = self.get_prompt(batch_size)
1832
+ fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys())
1833
+ kwargs.update(
1834
+ {
1835
+ "input_ids": input_ids,
1836
+ "attention_mask": attention_mask,
1837
+ "inputs_embeds": inputs_embeds,
1838
+ "output_attentions": output_attentions,
1839
+ "output_hidden_states": output_hidden_states,
1840
+ "return_dict": return_dict,
1841
+ "past_key_values": past_key_values,
1842
+ }
1843
+ )
1844
+ if "past_key_values" in fwd_params:
1845
+ return self.base_model(start_positions=start_positions, end_positions=end_positions, **kwargs)
1846
+ else:
1847
+ transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name)
1848
+ fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys())
1849
+ if "past_key_values" not in fwd_params:
1850
+ raise ValueError("Model does not support past key values which are required for prefix tuning.")
1851
+ outputs = transformer_backbone_name(**kwargs)
1852
+ sequence_output = outputs[0]
1853
+ if "dropout" in [name for name, _ in list(self.base_model.named_children())]:
1854
+ sequence_output = self.base_model.dropout(sequence_output)
1855
+ logits = self.base_model.get_submodule(self.cls_layer_name)(sequence_output)
1856
+ start_logits, end_logits = logits.split(1, dim=-1)
1857
+ start_logits = start_logits.squeeze(-1).contiguous()
1858
+ end_logits = end_logits.squeeze(-1).contiguous()
1859
+
1860
+ total_loss = None
1861
+ if start_positions is not None and end_positions is not None:
1862
+ # If we are on multi-GPU, split add a dimension
1863
+ if len(start_positions.size()) > 1:
1864
+ start_positions = start_positions.squeeze(-1)
1865
+ if len(end_positions.size()) > 1:
1866
+ end_positions = end_positions.squeeze(-1)
1867
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1868
+ ignored_index = start_logits.size(1)
1869
+ start_positions = start_positions.clamp(0, ignored_index)
1870
+ end_positions = end_positions.clamp(0, ignored_index)
1871
+
1872
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1873
+ start_loss = loss_fct(start_logits, start_positions)
1874
+ end_loss = loss_fct(end_logits, end_positions)
1875
+ total_loss = (start_loss + end_loss) / 2
1876
+
1877
+ if not return_dict:
1878
+ output = (start_logits, end_logits) + outputs[2:]
1879
+ return ((total_loss,) + output) if total_loss is not None else output
1880
+
1881
+ return QuestionAnsweringModelOutput(
1882
+ loss=total_loss,
1883
+ start_logits=start_logits,
1884
+ end_logits=end_logits,
1885
+ hidden_states=outputs.hidden_states,
1886
+ attentions=outputs.attentions,
1887
+ )
1888
+
1889
+
1890
+ class PeftModelForFeatureExtraction(PeftModel):
1891
+ """
1892
+ Peft model for extracting features/embeddings from transformer models
1893
+
1894
+ Args:
1895
+ model ([`~transformers.PreTrainedModel`]): Base transformer model.
1896
+ peft_config ([`PeftConfig`]): Peft config.
1897
+
1898
+ **Attributes**:
1899
+ - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model.
1900
+
1901
+ Example:
1902
+
1903
+ ```py
1904
+ >>> from transformers import AutoModel
1905
+ >>> from peft import PeftModelForFeatureExtraction, get_peft_config
1906
+
1907
+ >>> config = {
1908
+ ... "peft_type": "LORA",
1909
+ ... "task_type": "FEATURE_EXTRACTION",
1910
+ ... "inference_mode": False,
1911
+ ... "r": 16,
1912
+ ... "target_modules": ["query", "value"],
1913
+ ... "lora_alpha": 32,
1914
+ ... "lora_dropout": 0.05,
1915
+ ... "fan_in_fan_out": False,
1916
+ ... "bias": "none",
1917
+ ... }
1918
+ >>> peft_config = get_peft_config(config)
1919
+ >>> model = AutoModel.from_pretrained("bert-base-cased")
1920
+ >>> peft_model = PeftModelForFeatureExtraction(model, peft_config)
1921
+ >>> peft_model.print_trainable_parameters()
1922
+ ```
1923
+ """
1924
+
1925
+ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default"):
1926
+ super().__init__(model, peft_config, adapter_name)
1927
+
1928
+ def forward(
1929
+ self,
1930
+ input_ids=None,
1931
+ attention_mask=None,
1932
+ inputs_embeds=None,
1933
+ output_attentions=None,
1934
+ output_hidden_states=None,
1935
+ return_dict=None,
1936
+ task_ids=None,
1937
+ **kwargs,
1938
+ ):
1939
+ peft_config = self.active_peft_config
1940
+ if not peft_config.is_prompt_learning:
1941
+ if peft_config.peft_type == PeftType.POLY:
1942
+ kwargs["task_ids"] = task_ids
1943
+
1944
+ with self._enable_peft_forward_hooks(**kwargs):
1945
+ kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
1946
+ return self.base_model(
1947
+ input_ids=input_ids,
1948
+ attention_mask=attention_mask,
1949
+ inputs_embeds=inputs_embeds,
1950
+ output_attentions=output_attentions,
1951
+ output_hidden_states=output_hidden_states,
1952
+ return_dict=return_dict,
1953
+ **kwargs,
1954
+ )
1955
+
1956
+ batch_size = _get_batch_size(input_ids, inputs_embeds)
1957
+ if attention_mask is not None:
1958
+ # concat prompt attention mask
1959
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
1960
+ attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
1961
+
1962
+ if kwargs.get("position_ids", None) is not None:
1963
+ warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
1964
+ kwargs["position_ids"] = None
1965
+ if kwargs.get("token_type_ids", None) is not None:
1966
+ warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids")
1967
+ kwargs["token_type_ids"] = None
1968
+ kwargs.update(
1969
+ {
1970
+ "attention_mask": attention_mask,
1971
+ "output_attentions": output_attentions,
1972
+ "output_hidden_states": output_hidden_states,
1973
+ "return_dict": return_dict,
1974
+ }
1975
+ )
1976
+
1977
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
1978
+ past_key_values = self.get_prompt(batch_size)
1979
+ return self.base_model(input_ids=input_ids, past_key_values=past_key_values, **kwargs)
1980
+ else:
1981
+ if inputs_embeds is None:
1982
+ inputs_embeds = self.word_embeddings(input_ids)
1983
+ prompts = self.get_prompt(batch_size=batch_size)
1984
+ prompts = prompts.to(inputs_embeds.dtype)
1985
+ inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)
1986
+ return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
venv/lib/python3.10/site-packages/peft/py.typed ADDED
File without changes
venv/lib/python3.10/site-packages/peft/tuners/__init__.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+ # There's no way to ignore "F401 '...' imported but unused" warnings in this
3
+ # module, but to preserve other warnings. So, don't check this module at all
4
+
5
+ # coding=utf-8
6
+ # Copyright 2023-present the HuggingFace Inc. team.
7
+ #
8
+ # Licensed under the Apache License, Version 2.0 (the "License");
9
+ # you may not use this file except in compliance with the License.
10
+ # You may obtain a copy of the License at
11
+ #
12
+ # http://www.apache.org/licenses/LICENSE-2.0
13
+ #
14
+ # Unless required by applicable law or agreed to in writing, software
15
+ # distributed under the License is distributed on an "AS IS" BASIS,
16
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ # See the License for the specific language governing permissions and
18
+ # limitations under the License.
19
+
20
+ from .adaption_prompt import AdaptionPromptConfig, AdaptionPromptModel
21
+ from .lora import LoraConfig, LoraModel, LoftQConfig
22
+ from .loha import LoHaConfig, LoHaModel
23
+ from .lokr import LoKrConfig, LoKrModel
24
+ from .ia3 import IA3Config, IA3Model
25
+ from .adalora import AdaLoraConfig, AdaLoraModel
26
+ from .p_tuning import PromptEncoder, PromptEncoderConfig, PromptEncoderReparameterizationType
27
+ from .prefix_tuning import PrefixEncoder, PrefixTuningConfig
28
+ from .prompt_tuning import PromptEmbedding, PromptTuningConfig, PromptTuningInit
29
+ from .multitask_prompt_tuning import MultitaskPromptEmbedding, MultitaskPromptTuningConfig, MultitaskPromptTuningInit
30
+ from .oft import OFTConfig, OFTModel
31
+ from .mixed import MixedModel
32
+ from .poly import PolyConfig, PolyModel
venv/lib/python3.10/site-packages/peft/tuners/ia3/__init__.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from peft.import_utils import is_bnb_4bit_available, is_bnb_available
16
+
17
+ from .config import IA3Config
18
+ from .layer import Conv2d, IA3Layer, Linear
19
+ from .model import IA3Model
20
+
21
+
22
+ __all__ = ["Conv2d", "IA3Config", "IA3Layer", "IA3Model", "Linear"]
23
+
24
+
25
+ def __getattr__(name):
26
+ if (name == "Linear8bitLt") and is_bnb_available():
27
+ from .bnb import Linear8bitLt
28
+
29
+ return Linear8bitLt
30
+
31
+ if (name == "Linear4bit") and is_bnb_4bit_available():
32
+ from .bnb import Linear4bit
33
+
34
+ return Linear4bit
35
+
36
+ raise AttributeError(f"module {__name__} has no attribute {name}")
venv/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (803 Bytes). View file
 
venv/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/bnb.cpython-310.pyc ADDED
Binary file (2.57 kB). View file
 
venv/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/config.cpython-310.pyc ADDED
Binary file (4.24 kB). View file
 
venv/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/layer.cpython-310.pyc ADDED
Binary file (7.63 kB). View file
 
venv/lib/python3.10/site-packages/peft/tuners/ia3/__pycache__/model.cpython-310.pyc ADDED
Binary file (13.3 kB). View file
 
venv/lib/python3.10/site-packages/peft/tuners/ia3/bnb.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Any
16
+
17
+ import torch
18
+
19
+ from peft.import_utils import is_bnb_4bit_available, is_bnb_available
20
+
21
+ from .layer import IA3Layer
22
+
23
+
24
+ if is_bnb_available():
25
+
26
+ class Linear8bitLt(torch.nn.Module, IA3Layer):
27
+ # (IA)^3 implemented in a dense layer
28
+ def __init__(
29
+ self,
30
+ base_layer: torch.nn.Module,
31
+ adapter_name: str,
32
+ is_feedforward: bool,
33
+ init_ia3_weights: bool = True,
34
+ **kwargs,
35
+ ) -> None:
36
+ super().__init__()
37
+ IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward)
38
+
39
+ # Freezing the pre-trained weight matrix
40
+ self.get_base_layer().weight.requires_grad = False
41
+ self._active_adapter = adapter_name
42
+ self.update_layer(adapter_name, init_ia3_weights)
43
+
44
+ def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
45
+ # note: no check for self.merged because merging is not supported (yet)
46
+ if self.disable_adapters:
47
+ return self.base_layer(x)
48
+
49
+ ia3_scaling = 1
50
+ for active_adapter in self.active_adapters:
51
+ if active_adapter not in self.ia3_l.keys():
52
+ continue
53
+ ia3_scaling *= self.ia3_l[active_adapter].flatten()
54
+
55
+ requires_conversion = (not torch.is_autocast_enabled()) and (x.dtype != torch.float32)
56
+ if requires_conversion:
57
+ x = x.float()
58
+ if self.is_feedforward:
59
+ result = self.base_layer(x * ia3_scaling)
60
+ expected_dtype = result.dtype
61
+ else:
62
+ result = self.base_layer(x)
63
+ expected_dtype = result.dtype
64
+ result = result * ia3_scaling
65
+
66
+ if requires_conversion:
67
+ result = result.to(expected_dtype)
68
+
69
+ return result
70
+
71
+ def __repr__(self) -> str:
72
+ rep = super().__repr__()
73
+ return "ia3." + rep
74
+
75
+
76
+ if is_bnb_4bit_available():
77
+
78
+ class Linear4bit(torch.nn.Module, IA3Layer):
79
+ # IA3 implemented in a dense layer
80
+ def __init__(
81
+ self,
82
+ base_layer: torch.nn.Module,
83
+ adapter_name: str,
84
+ is_feedforward: bool,
85
+ init_ia3_weights: bool = True,
86
+ **kwargs,
87
+ ) -> None:
88
+ super().__init__()
89
+ IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward)
90
+
91
+ # Freezing the pre-trained weight matrix
92
+ self.get_base_layer().weight.requires_grad = False
93
+ self._active_adapter = adapter_name
94
+ self.update_layer(adapter_name, init_ia3_weights)
95
+
96
+ def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
97
+ # note: no check for self.merged because merging is not supported (yet)
98
+ if self.disable_adapters:
99
+ return self.base_layer(x)
100
+
101
+ ia3_scaling = 1
102
+ for active_adapter in self.active_adapters:
103
+ if active_adapter not in self.ia3_l.keys():
104
+ continue
105
+ ia3_scaling *= self.ia3_l[active_adapter].flatten()
106
+
107
+ requires_conversion = (not torch.is_autocast_enabled()) and (x.dtype != torch.float32)
108
+ if requires_conversion:
109
+ x = x.float()
110
+ if self.is_feedforward:
111
+ result = self.base_layer(x * ia3_scaling)
112
+ expected_dtype = result.dtype
113
+ else:
114
+ result = self.base_layer(x)
115
+ expected_dtype = result.dtype
116
+ result = result * ia3_scaling
117
+
118
+ result = result.clone()
119
+ # adalora.py and lora.py both suggest that this is necessary for 4-bit training on older versions of Pytorch.
120
+ # This has been duplicated here.
121
+
122
+ if requires_conversion:
123
+ result = result.to(expected_dtype)
124
+
125
+ return result
126
+
127
+ def __repr__(self) -> str:
128
+ rep = super().__repr__()
129
+ return "ia3." + rep
venv/lib/python3.10/site-packages/peft/tuners/ia3/config.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from dataclasses import dataclass, field
16
+ from typing import List, Optional, Union
17
+
18
+ from peft.config import PeftConfig
19
+ from peft.utils import PeftType
20
+
21
+
22
+ @dataclass
23
+ class IA3Config(PeftConfig):
24
+ """
25
+ This is the configuration class to store the configuration of a [`IA3Model`].
26
+
27
+ Args:
28
+ target_modules (`Optional[Union[List[str], str]]`):
29
+ The names of the modules to apply the adapter to. If this is specified, only the modules with the specified
30
+ names will be replaced. When passing a string, a regex match will be performed. When passing a list of
31
+ strings, either an exact match will be performed or it is checked if the name of the module ends with any
32
+ of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen,
33
+ excluding the output layer. If this is not specified, modules will be chosen according to the model
34
+ architecture. If the architecture is not known, an error will be raised -- in this case, you should specify
35
+ the target modules manually.
36
+ feedforward_modules (`Optional[Union[List[str], str]]`):
37
+ The names of the modules to be treated as feedforward modules, as in the original paper. These modules will
38
+ have (IA)³ vectors multiplied to the input, instead of the output. `feedforward_modules` must be a name or
39
+ a subset of names present in `target_modules`.
40
+ fan_in_fan_out (`bool`):
41
+ Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses
42
+ `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`.
43
+ modules_to_save (`Optional[List[str]]`):
44
+ List of modules apart from (IA)³ layers to be set as trainable and saved in the final checkpoint.
45
+ init_ia3_weights (`bool`):
46
+ Whether to initialize the vectors in the (IA)³ layers, defaults to `True`. Setting this to `False` is
47
+ discouraged.
48
+ """
49
+
50
+ target_modules: Optional[Union[List[str], str]] = field(
51
+ default=None,
52
+ metadata={
53
+ "help": (
54
+ "List of module names or regex expression of the module names to replace with (IA)³."
55
+ "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'."
56
+ "This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer."
57
+ "If not specified, modules will be chosen according to the model architecture, If the architecture is "
58
+ "not known, an error will be raised -- in this case, you should specify the target modules manually."
59
+ ),
60
+ },
61
+ )
62
+ feedforward_modules: Optional[Union[List[str], str]] = field(
63
+ default=None,
64
+ metadata={
65
+ "help": "List of module names or a regex expression of module names which are feedforward"
66
+ "For example, ['output.dense']"
67
+ },
68
+ )
69
+ fan_in_fan_out: bool = field(
70
+ default=False,
71
+ metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"},
72
+ )
73
+ modules_to_save: Optional[List[str]] = field(
74
+ default=None,
75
+ metadata={
76
+ "help": "List of modules apart from (IA)^3 layers to be set as trainable and saved in the final checkpoint. "
77
+ "For example, in Sequence Classification or Token Classification tasks, "
78
+ "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved."
79
+ },
80
+ )
81
+ init_ia3_weights: bool = field(
82
+ default=True,
83
+ metadata={"help": "Whether to initialize the vectors in the (IA)^3 layers."},
84
+ )
85
+
86
+ def __post_init__(self):
87
+ self.peft_type = PeftType.IA3
88
+ self.target_modules = (
89
+ set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
90
+ )
91
+ self.feedforward_modules = (
92
+ set(self.feedforward_modules) if isinstance(self.feedforward_modules, list) else self.feedforward_modules
93
+ )
94
+
95
+ # check if feedforward_modules is a subset of target_modules. run the check only if both are sets
96
+ if isinstance(self.feedforward_modules, set) and isinstance(self.target_modules, set):
97
+ if not self.feedforward_modules.issubset(self.target_modules):
98
+ raise ValueError("`feedforward_modules` should be a subset of `target_modules`")
venv/lib/python3.10/site-packages/peft/tuners/ia3/layer.py ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import warnings
16
+ from typing import Any, List, Optional
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ from transformers.pytorch_utils import Conv1D
21
+
22
+ from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
23
+ from peft.utils import transpose
24
+
25
+
26
+ class IA3Layer(BaseTunerLayer):
27
+ # All names of layers that may contain adapter weights
28
+ adapter_layer_names = ("ia3_l",)
29
+
30
+ def __init__(self, base_layer: nn.Module, is_feedforward: bool, **kwargs) -> None:
31
+ self.base_layer = base_layer
32
+ self.ia3_l = nn.ParameterDict({})
33
+ # Mark the weight as unmerged
34
+ self._disable_adapters = False
35
+ self.merged_adapters = []
36
+ self.is_feedforward = is_feedforward
37
+
38
+ base_layer = self.get_base_layer()
39
+ if isinstance(base_layer, nn.Linear):
40
+ in_features, out_features = base_layer.in_features, base_layer.out_features
41
+ elif isinstance(base_layer, nn.Conv2d):
42
+ in_features, out_features = base_layer.in_channels, base_layer.out_channels
43
+ elif isinstance(base_layer, nn.Embedding):
44
+ in_features, out_features = base_layer.num_embeddings, base_layer.embedding_dim
45
+ elif isinstance(base_layer, Conv1D):
46
+ in_features, out_features = (
47
+ base_layer.weight.ds_shape if hasattr(base_layer.weight, "ds_shape") else base_layer.weight.shape
48
+ )
49
+ else:
50
+ raise ValueError(f"Unsupported layer type {type(base_layer)}")
51
+ self.in_features = in_features
52
+ self.out_features = out_features
53
+
54
+ def update_layer(self, adapter_name, init_ia3_weights):
55
+ # This code works for linear layers, override for other layer types
56
+ # Actual trainable parameters
57
+ if self.is_feedforward:
58
+ weight = torch.randn((1, self.in_features))
59
+ else:
60
+ weight = torch.randn((self.out_features, 1))
61
+ self.ia3_l[adapter_name] = nn.Parameter(weight)
62
+ if init_ia3_weights:
63
+ self.reset_ia3_parameters(adapter_name)
64
+ self.to(self.get_base_layer().weight.device)
65
+ self.set_adapter(self.active_adapters)
66
+
67
+ def reset_ia3_parameters(self, adapter_name):
68
+ if adapter_name in self.ia3_l.keys():
69
+ # initialize learned vector with torch.ones
70
+ nn.init.constant_(self.ia3_l[adapter_name], 1.0)
71
+
72
+
73
+ class Linear(nn.Module, IA3Layer):
74
+ # (IA)^3 implemented in a dense layer
75
+ def __init__(
76
+ self,
77
+ base_layer: nn.Module,
78
+ adapter_name: str,
79
+ fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out)
80
+ is_feedforward: bool = False, # Set to True if the layer is treated as a feedforward layer
81
+ is_target_conv_1d_layer: bool = False, # whether target module is a conv1d layer. useful while unloading later
82
+ init_ia3_weights: bool = True, # whether to initialize IA3 weights
83
+ **kwargs,
84
+ ) -> None:
85
+ super().__init__()
86
+ IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward)
87
+ self.fan_in_fan_out = fan_in_fan_out
88
+ self.is_target_conv_1d_layer = is_target_conv_1d_layer
89
+ self._active_adapter = adapter_name
90
+ self.update_layer(adapter_name, init_ia3_weights)
91
+
92
+ def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None:
93
+ """
94
+ Merge the active adapter weights into the base weights
95
+
96
+ Args:
97
+ safe_merge (`bool`, *optional*):
98
+ If True, the merge operation will be performed in a copy of the original weights and check for NaNs
99
+ before merging the weights. This is useful if you want to check if the merge operation will produce
100
+ NaNs. Defaults to `False`.
101
+ adapter_names (`List[str]`, *optional*):
102
+ The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
103
+ to `None`.
104
+ """
105
+ adapter_names = check_adapters_to_merge(self, adapter_names)
106
+ if not adapter_names:
107
+ # no adapter to merge
108
+ return
109
+
110
+ for active_adapter in adapter_names:
111
+ if active_adapter in self.ia3_l.keys():
112
+ base_layer = self.get_base_layer()
113
+ ia3_l = transpose(self.ia3_l[active_adapter].data, self.fan_in_fan_out)
114
+ if safe_merge:
115
+ orig_weights = base_layer.weight.data
116
+ orig_weights = torch.mul(orig_weights, ia3_l)
117
+
118
+ if not torch.isfinite(orig_weights).all():
119
+ raise ValueError(
120
+ f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
121
+ )
122
+ base_layer.weight.data = orig_weights
123
+ else:
124
+ base_layer.weight.data = torch.mul(base_layer.weight.data, ia3_l)
125
+
126
+ if not self.is_feedforward and (base_layer.bias is not None):
127
+ scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape)
128
+ base_layer.bias.data = torch.mul(base_layer.bias.data, scaling.data)
129
+
130
+ self.merged_adapters.append(active_adapter)
131
+
132
+ def unmerge(self) -> None:
133
+ """
134
+ This method unmerges all merged adapter layers from the base weights.
135
+ """
136
+ if not self.merged:
137
+ warnings.warn("Already unmerged. Nothing to do.")
138
+ return
139
+
140
+ warnings.warn("Unmerge result can be inaccurate for (IA)^3.")
141
+ while len(self.merged_adapters) > 0:
142
+ active_adapter = self.merged_adapters.pop()
143
+ if active_adapter in self.ia3_l.keys():
144
+ base_layer = self.get_base_layer()
145
+ # Add tolerace to avoid division by zero
146
+ ia3_l = transpose(self.ia3_l[active_adapter].data, self.fan_in_fan_out) + 1e-8
147
+ base_layer.weight.data = torch.div(base_layer.weight.data, ia3_l)
148
+
149
+ if not self.is_feedforward and (base_layer.bias is not None):
150
+ scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape)
151
+ base_layer.bias.data = torch.div(base_layer.bias.data, scaling.data + 1e-8)
152
+
153
+ def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
154
+ dtype = previous_dtype = x.dtype
155
+
156
+ if self.disable_adapters:
157
+ if self.merged:
158
+ self.unmerge()
159
+ result = self.base_layer(x, *args, **kwargs)
160
+ elif self.merged:
161
+ result = self.base_layer(x, *args, **kwargs)
162
+ else:
163
+ ia3_scaling = 1
164
+ for active_adapter in self.active_adapters:
165
+ if active_adapter not in self.ia3_l.keys():
166
+ continue
167
+ dtype = self.ia3_l[active_adapter].dtype
168
+ ia3_scaling *= self.ia3_l[active_adapter].flatten()
169
+
170
+ if self.is_feedforward:
171
+ x = x.to(dtype)
172
+ # TODO: weight.dtype can be != self.ia3_l[self.active_adapters].dtype
173
+ # e.g. bf16 vs fp32. Is that okay?
174
+ interm = (x * ia3_scaling).to(self.get_base_layer().weight.dtype)
175
+ result = self.base_layer(interm, *args, **kwargs)
176
+ else:
177
+ result = self.base_layer(x, *args, **kwargs)
178
+ result = result.to(dtype) * ia3_scaling
179
+
180
+ result = result.to(previous_dtype)
181
+ return result
182
+
183
+
184
+ class Conv2d(nn.Module, IA3Layer):
185
+ def __init__(
186
+ self,
187
+ base_layer: nn.Module,
188
+ adapter_name: str,
189
+ fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out)
190
+ is_feedforward: bool = False, # Set to True if the layer is treated as a feedforward layer
191
+ init_ia3_weights: bool = True,
192
+ **kwargs,
193
+ ) -> None:
194
+ super().__init__()
195
+ IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward)
196
+ self.fan_in_fan_out = fan_in_fan_out
197
+ self._active_adapter = adapter_name
198
+
199
+ self.update_layer(adapter_name, init_ia3_weights)
200
+
201
+ def update_layer(self, adapter_name, init_ia3_weights):
202
+ # Actual trainable parameters
203
+ if self.is_feedforward:
204
+ weight = torch.randn((1, self.in_features, 1, 1))
205
+ else:
206
+ weight = torch.randn((1, self.out_features, 1, 1))
207
+ self.ia3_l[adapter_name] = nn.Parameter(weight)
208
+ if init_ia3_weights:
209
+ self.reset_ia3_parameters(adapter_name)
210
+ self.to(self.get_base_layer().weight.device)
211
+ self.set_adapter(self.active_adapters)
212
+
213
+ def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None:
214
+ """
215
+ Merge the active adapter weights into the base weights
216
+
217
+ Args:
218
+ safe_merge (`bool`, *optional*):
219
+ If True, the merge operation will be performed in a copy of the original weights and check for NaNs
220
+ before merging the weights. This is useful if you want to check if the merge operation will produce
221
+ NaNs. Defaults to `False`.
222
+ adapter_names (`List[str]`, *optional*):
223
+ The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
224
+ to `None`.
225
+ """
226
+ adapter_names = check_adapters_to_merge(self, adapter_names)
227
+ if not adapter_names:
228
+ # no adapter to merge
229
+ return
230
+
231
+ for active_adapter in adapter_names:
232
+ if active_adapter in self.ia3_l.keys():
233
+ base_layer = self.get_base_layer()
234
+ ia3_scaling = self.ia3_l[active_adapter].data
235
+ if not self.is_feedforward:
236
+ ia3_scaling = ia3_scaling.permute(1, 0, 2, 3)
237
+
238
+ if safe_merge:
239
+ output_weight = torch.mul(base_layer.weight.data, ia3_scaling).clone()
240
+
241
+ if not torch.isfinite(output_weight).all():
242
+ raise ValueError(
243
+ f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
244
+ )
245
+
246
+ base_layer.weight.data = output_weight
247
+ else:
248
+ base_layer.weight.data = torch.mul(base_layer.weight.data, ia3_scaling)
249
+
250
+ if not self.is_feedforward and (base_layer.bias is not None):
251
+ scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape)
252
+ base_layer.bias.data = torch.mul(base_layer.bias.data, scaling.data)
253
+
254
+ self.merged_adapters.append(active_adapter)
255
+
256
+ def unmerge(self) -> None:
257
+ """
258
+ This method unmerges all merged adapter layers from the base weights.
259
+ """
260
+ if not self.merged:
261
+ warnings.warn("Already unmerged. Nothing to do.")
262
+ return
263
+
264
+ warnings.warn("Unmerge result can be inaccurate for (IA)^3.")
265
+ while len(self.merged_adapters) > 0:
266
+ active_adapter = self.merged_adapters.pop()
267
+ if active_adapter in self.ia3_l.keys():
268
+ base_layer = self.get_base_layer()
269
+ # divide by (IA)^3 vector. Add tolerace to avoid division by zero
270
+ ia3_scaling = self.ia3_l[active_adapter].data
271
+ if not self.is_feedforward:
272
+ ia3_scaling = ia3_scaling.permute(1, 0, 2, 3)
273
+ base_layer.weight.data = torch.div(base_layer.weight.data, ia3_scaling + 1e-8)
274
+
275
+ if not self.is_feedforward and (base_layer.bias is not None):
276
+ scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape)
277
+ base_layer.bias.data = torch.mul(base_layer.bias.data, scaling.data)
278
+
279
+ def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
280
+ dtype = previous_dtype = x.dtype
281
+
282
+ if self.disable_adapters:
283
+ if self.merged:
284
+ self.unmerge()
285
+ result = self.base_layer(x, *args, **kwargs)
286
+ elif self.merged:
287
+ result = self.base_layer(x, *args, **kwargs)
288
+ else:
289
+ ia3_scaling = 1
290
+ for active_adapter in self.active_adapters:
291
+ if active_adapter not in self.ia3_l.keys():
292
+ continue
293
+ dtype = self.ia3_l[active_adapter].dtype
294
+ ia3_scaling *= self.ia3_l[active_adapter]
295
+
296
+ if self.is_feedforward:
297
+ x = x.to(dtype)
298
+ # TODO: weight.dtype can be != self.ia3_l[self.active_adapters].dtype
299
+ # e.g. bf16 vs fp32. Is that okay?
300
+ interm = (x * ia3_scaling).to(self.get_base_layer().weight.dtype)
301
+ result = self.base_layer(interm, *args, **kwargs)
302
+ else:
303
+ result = self.base_layer(x, *args, **kwargs)
304
+ result = result.to(dtype) * ia3_scaling
305
+
306
+ result = result.to(previous_dtype)
307
+ return result
venv/lib/python3.10/site-packages/peft/tuners/ia3/model.py ADDED
@@ -0,0 +1,394 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from __future__ import annotations
15
+
16
+ import re
17
+ import warnings
18
+ from dataclasses import asdict
19
+ from enum import Enum
20
+ from typing import Optional
21
+
22
+ import torch
23
+ from torch import nn
24
+ from transformers.pytorch_utils import Conv1D
25
+
26
+ from peft.import_utils import is_bnb_4bit_available, is_bnb_available
27
+ from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists
28
+ from peft.utils import (
29
+ TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING,
30
+ TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING,
31
+ ModulesToSaveWrapper,
32
+ _get_submodules,
33
+ )
34
+
35
+ from .layer import Conv2d, IA3Layer, Linear
36
+
37
+
38
+ class IA3Model(BaseTuner):
39
+ """
40
+ Creates a Infused Adapter by Inhibiting and Amplifying Inner Activations ((IA)^3) model from a pretrained
41
+ transformers model. The method is described in detail in https://arxiv.org/abs/2205.05638
42
+
43
+ Args:
44
+ model ([`~transformers.PreTrainedModel`]): The model to be adapted.
45
+ config ([`IA3Config`]): The configuration of the (IA)^3 model.
46
+ adapter_name (`str`): The name of the adapter, defaults to `"default"`.
47
+
48
+ Returns:
49
+ `torch.nn.Module`: The (IA)^3 model.
50
+
51
+ Example:
52
+
53
+ ```py
54
+ >>> from transformers import AutoModelForSeq2SeqLM, ia3Config
55
+ >>> from peft import IA3Model, IA3Config
56
+
57
+ >>> config = IA3Config(
58
+ ... peft_type="IA3",
59
+ ... task_type="SEQ_2_SEQ_LM",
60
+ ... target_modules=["k", "v", "w0"],
61
+ ... feedforward_modules=["w0"],
62
+ ... )
63
+
64
+ >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
65
+ >>> ia3_model = IA3Model(config, model)
66
+ ```
67
+
68
+ **Attributes**:
69
+ - **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted.
70
+ - **peft_config** ([`ia3Config`]): The configuration of the (IA)^3 model.
71
+ """
72
+
73
+ prefix: str = "ia3_"
74
+
75
+ def __init__(self, model, config, adapter_name):
76
+ super().__init__(model, config, adapter_name)
77
+
78
+ @staticmethod
79
+ def _create_new_module(ia3_config, adapter_name, target, **kwargs):
80
+ # avoid eager bnb import
81
+ if is_bnb_available():
82
+ import bitsandbytes as bnb
83
+
84
+ from .bnb import Linear8bitLt
85
+
86
+ if is_bnb_4bit_available():
87
+ from .bnb import Linear4bit
88
+
89
+ loaded_in_8bit = kwargs.pop("loaded_in_8bit", False)
90
+ loaded_in_4bit = kwargs.pop("loaded_in_4bit", False)
91
+ is_feedforward = kwargs.pop("is_feedforward", False)
92
+
93
+ if isinstance(target, BaseTunerLayer):
94
+ target_base_layer = target.get_base_layer()
95
+ else:
96
+ target_base_layer = target
97
+
98
+ if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt):
99
+ eightbit_kwargs = kwargs.copy()
100
+ eightbit_kwargs.update(
101
+ {
102
+ "has_fp16_weights": target_base_layer.state.has_fp16_weights,
103
+ "memory_efficient_backward": target_base_layer.state.memory_efficient_backward,
104
+ "threshold": target_base_layer.state.threshold,
105
+ "index": target_base_layer.index,
106
+ }
107
+ )
108
+ new_module = Linear8bitLt(target, adapter_name, is_feedforward=is_feedforward, **eightbit_kwargs)
109
+ elif loaded_in_4bit and isinstance(target_base_layer, bnb.nn.Linear4bit):
110
+ fourbit_kwargs = kwargs.copy()
111
+ fourbit_kwargs.update(
112
+ {
113
+ "compute_dtype": target_base_layer.compute_dtype,
114
+ "compress_statistics": target_base_layer.weight.compress_statistics,
115
+ "quant_type": target_base_layer.weight.quant_type,
116
+ }
117
+ )
118
+ new_module = Linear4bit(target, adapter_name, is_feedforward=is_feedforward, **fourbit_kwargs)
119
+ elif isinstance(target, torch.nn.Conv2d):
120
+ new_module = Conv2d(target, adapter_name, is_feedforward=is_feedforward, **kwargs)
121
+ elif isinstance(target_base_layer, torch.nn.Linear):
122
+ if kwargs["fan_in_fan_out"]:
123
+ warnings.warn(
124
+ "fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. "
125
+ "Setting fan_in_fan_out to False."
126
+ )
127
+ kwargs["fan_in_fan_out"] = ia3_config.fan_in_fan_out = False
128
+ new_module = Linear(target, adapter_name, is_feedforward=is_feedforward, **kwargs)
129
+ elif isinstance(target_base_layer, Conv1D):
130
+ if not kwargs["fan_in_fan_out"]:
131
+ warnings.warn(
132
+ "fan_in_fan_out is set to False but the target module is `Conv1D`. "
133
+ "Setting fan_in_fan_out to True."
134
+ )
135
+ kwargs["fan_in_fan_out"] = ia3_config.fan_in_fan_out = True
136
+ new_module = Linear(
137
+ target, adapter_name, is_feedforward=is_feedforward, is_target_conv_1d_layer=True, **kwargs
138
+ )
139
+ else:
140
+ raise ValueError(
141
+ f"Target module {target} is not supported. "
142
+ f"Currently, only `torch.nn.Linear`, `torch.nn.Conv2d`, and `Conv1D` are supported."
143
+ )
144
+ return new_module
145
+
146
+ @staticmethod
147
+ def _check_target_module_exists(ia3_config, key):
148
+ return check_target_module_exists(ia3_config, key)
149
+
150
+ def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None:
151
+ for n, p in model.named_parameters():
152
+ if self.prefix not in n:
153
+ p.requires_grad = False
154
+
155
+ def _create_and_replace(
156
+ self,
157
+ ia3_config,
158
+ adapter_name,
159
+ target,
160
+ target_name,
161
+ parent,
162
+ current_key,
163
+ ):
164
+ # check if target module is in feedforward_modules
165
+ is_feedforward = self._check_target_module_feedforward(ia3_config, current_key)
166
+
167
+ kwargs = {
168
+ "fan_in_fan_out": ia3_config.fan_in_fan_out,
169
+ "init_ia3_weights": ia3_config.init_ia3_weights,
170
+ "is_feedforward": is_feedforward,
171
+ "loaded_in_8bit": getattr(self.model, "is_loaded_in_8bit", False),
172
+ "loaded_in_4bit": getattr(self.model, "is_loaded_in_4bit", False),
173
+ }
174
+
175
+ if isinstance(target, IA3Layer):
176
+ target.update_layer(
177
+ adapter_name,
178
+ ia3_config.init_ia3_weights,
179
+ )
180
+ else:
181
+ new_module = self._create_new_module(ia3_config, adapter_name, target, **kwargs)
182
+ if adapter_name != self.active_adapter:
183
+ # adding an additional adapter: it is not automatically trainable
184
+ new_module.requires_grad_(False)
185
+ self._replace_module(parent, target_name, new_module, target)
186
+
187
+ @staticmethod
188
+ def _check_target_module_feedforward(ia3_config, key) -> bool:
189
+ """
190
+ A helper private method that checks if the target module `key` matches with a feedforward module specified in
191
+ `ia3_config`
192
+ """
193
+ if isinstance(ia3_config.feedforward_modules, str):
194
+ is_feedforward = bool(re.fullmatch(ia3_config.feedforward_modules, key))
195
+ else:
196
+ is_feedforward = any(key.endswith(target_key) for target_key in ia3_config.feedforward_modules)
197
+ return is_feedforward
198
+
199
+ def _replace_module(self, parent, child_name, new_module, child):
200
+ setattr(parent, child_name, new_module)
201
+
202
+ # child layer wraps the original module, unpack it
203
+ if hasattr(child, "base_layer"):
204
+ child = child.base_layer
205
+
206
+ # layers with base_layer don't need the weight to be copied, as they have a reference already
207
+ if not hasattr(new_module, "base_layer"):
208
+ new_module.weight = child.weight
209
+ if hasattr(child, "bias"):
210
+ new_module.bias = child.bias
211
+
212
+ if getattr(child, "state", None) is not None:
213
+ if hasattr(new_module, "base_layer"):
214
+ new_module.base_layer.state = child.state
215
+ else:
216
+ new_module.state = child.state
217
+ new_module.to(child.weight.device)
218
+
219
+ # dispatch to correct device
220
+ for name, module in new_module.named_modules():
221
+ if self.prefix in name:
222
+ module.to(child.weight.device)
223
+
224
+ def __getattr__(self, name: str):
225
+ """Forward missing attributes to the wrapped module."""
226
+ try:
227
+ return super().__getattr__(name) # defer to nn.Module's logic
228
+ except AttributeError:
229
+ return getattr(self.model, name)
230
+
231
+ def get_peft_config_as_dict(self, inference: bool = False):
232
+ config_dict = {}
233
+ for key, value in self.peft_config.items():
234
+ config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()}
235
+ if inference:
236
+ config["inference_mode"] = True
237
+ config_dict[key] = config
238
+ return config
239
+
240
+ def _set_adapter_layers(self, enabled=True):
241
+ for module in self.model.modules():
242
+ if isinstance(module, (IA3Layer, ModulesToSaveWrapper)):
243
+ module.enable_adapters(enabled)
244
+
245
+ def enable_adapter_layers(self) -> None:
246
+ """Enable all adapters.
247
+
248
+ Call this if you have previously disabled all adapters and want to re-enable them.
249
+ """
250
+ self._set_adapter_layers(enabled=True)
251
+
252
+ def disable_adapter_layers(self) -> None:
253
+ """Disable all adapters.
254
+
255
+ When disabling all adapters, the model output corresponds to the output of the base model.
256
+ """
257
+ self._set_adapter_layers(enabled=False)
258
+
259
+ def set_adapter(self, adapter_name: str | list[str]) -> None:
260
+ """Set the active adapter(s).
261
+
262
+ Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is
263
+ not desired, use the following code.
264
+
265
+ ```py
266
+ >>> for name, param in model_peft.named_parameters():
267
+ ... if ...: # some check on name (ex. if 'lora' in name)
268
+ ... param.requires_grad = False
269
+ ```
270
+
271
+ Args:
272
+ adapter_name (`str` or `list[str]`): Name of the adapter(s) to be activated.
273
+ """
274
+ for module in self.model.modules():
275
+ if isinstance(module, IA3Layer):
276
+ if module.merged:
277
+ warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.")
278
+ module.unmerge()
279
+ module.set_adapter(adapter_name)
280
+
281
+ def _prepare_adapter_config(self, peft_config, model_config):
282
+ if peft_config.target_modules is None:
283
+ if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING:
284
+ raise ValueError("Please specify `target_modules` in `peft_config`")
285
+ peft_config.target_modules = TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING[model_config["model_type"]]
286
+ if peft_config.feedforward_modules is None:
287
+ if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING:
288
+ raise ValueError("Please specify `feedforward_modules` in `peft_config`")
289
+ peft_config.feedforward_modules = TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING[
290
+ model_config["model_type"]
291
+ ]
292
+ return peft_config
293
+
294
+ def _unload_and_optionally_merge(
295
+ self, merge: bool = True, safe_merge: bool = False, adapter_names: Optional[list[str]] = None
296
+ ):
297
+ r"""
298
+ This method merges the (IA)^3 layers into the base model. This is needed if someone wants to use the base model
299
+ as a standalone model.
300
+
301
+ Args:
302
+ safe_merge (`bool`, `optional`, defaults to `False`):
303
+ If True, the merge operation will be performed in a copy of the original weights and check for NaNs
304
+ before merging the weights. This is useful if you want to check if the merge operation will produce
305
+ NaNs. Defaults to `False`.
306
+ adapter_names (`List[str]`, *optional*):
307
+ The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
308
+ to `None`.
309
+ """
310
+ if getattr(self.model, "is_loaded_in_8bit", False):
311
+ raise ValueError("Cannot merge ia3 layers when the model is loaded in 8-bit mode")
312
+
313
+ if getattr(self.model, "is_loaded_in_4bit", False):
314
+ raise ValueError("Cannot merge ia3 layers when the model is loaded in 4-bit mode")
315
+
316
+ self._unloading_checks(adapter_names)
317
+ key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key]
318
+ for key in key_list:
319
+ try:
320
+ parent, target, target_name = _get_submodules(self.model, key)
321
+ except AttributeError:
322
+ continue
323
+
324
+ if hasattr(target, "base_layer"):
325
+ if merge:
326
+ target.merge(safe_merge=safe_merge, adapter_names=adapter_names)
327
+ self._replace_module(parent, target_name, target.get_base_layer(), target)
328
+ elif isinstance(target, ModulesToSaveWrapper):
329
+ # save any additional trainable modules part of `modules_to_save`
330
+ new_module = target.modules_to_save[target.active_adapter]
331
+ if hasattr(new_module, "base_layer"):
332
+ # check if the module is itself a tuner layer
333
+ if merge:
334
+ new_module.merge(safe_merge=safe_merge, adapter_names=adapter_names)
335
+ new_module = new_module.get_base_layer()
336
+ setattr(parent, target_name, new_module)
337
+
338
+ return self.model
339
+
340
+ def merge_and_unload(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> torch.nn.Module:
341
+ r"""
342
+ This method merges the IA³ layers into the base model. This is needed if someone wants to use the base model as
343
+ a standalone model.
344
+
345
+ Args:
346
+ safe_merge (`bool`):
347
+ whether to activate the safe merging check to check if there is any potential Nan in the adapter
348
+ weights
349
+ adapter_names (`List[str]`, *optional*):
350
+ The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
351
+ to `None`.
352
+
353
+ Example:
354
+
355
+ ```py
356
+ >>> from transformers import AutoModelForCausalLM
357
+ >>> from peft import PeftModel
358
+
359
+ >>> base_model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-40b")
360
+ >>> peft_model_id = "smangrul/falcon-40B-int4-peft-lora-sfttrainer-sample"
361
+ >>> model = PeftModel.from_pretrained(base_model, peft_model_id)
362
+ >>> merged_model = model.merge_and_unload()
363
+ ```
364
+ """
365
+ return self._unload_and_optionally_merge(safe_merge=safe_merge, adapter_names=adapter_names)
366
+
367
+ def unload(self) -> torch.nn.Module:
368
+ """
369
+ Gets back the base model by removing all the IA³ modules without merging. This gives back the original base
370
+ model.
371
+ """
372
+ return self._unload_and_optionally_merge(merge=False)
373
+
374
+ def delete_adapter(self, adapter_name: str) -> None:
375
+ """
376
+ Deletes an existing adapter.
377
+
378
+ Args:
379
+ adapter_name (str): Name of the adapter to be deleted.
380
+ """
381
+ if adapter_name not in self.peft_config:
382
+ raise ValueError(f"Adapter {adapter_name} does not exist")
383
+ del self.peft_config[adapter_name]
384
+
385
+ key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key]
386
+ new_adapter = None
387
+ for key in key_list:
388
+ _, target, _ = _get_submodules(self.model, key)
389
+ if isinstance(target, IA3Layer):
390
+ target.delete_adapter(adapter_name)
391
+ if new_adapter is None:
392
+ new_adapter = target.active_adapters[:]
393
+
394
+ self.active_adapter = new_adapter or []
venv/lib/python3.10/site-packages/peft/tuners/lycoris_utils.py ADDED
@@ -0,0 +1,428 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from __future__ import annotations
15
+
16
+ import warnings
17
+ from abc import abstractmethod
18
+ from dataclasses import dataclass, field
19
+ from typing import Any, Optional, Union
20
+
21
+ import torch
22
+ import torch.nn as nn
23
+ from tqdm import tqdm
24
+
25
+ from peft.config import PeftConfig
26
+ from peft.utils import (
27
+ ModulesToSaveWrapper,
28
+ _get_submodules,
29
+ )
30
+
31
+ from .tuners_utils import BaseTuner, BaseTunerLayer, check_adapters_to_merge, check_target_module_exists
32
+
33
+
34
+ @dataclass
35
+ class LycorisConfig(PeftConfig):
36
+ r"""
37
+ A base config for LyCORIS like adapters
38
+ """
39
+
40
+ rank_pattern: Optional[dict] = field(
41
+ default_factory=dict,
42
+ metadata={
43
+ "help": (
44
+ "The mapping from layer names or regexp expression to ranks which are different from the default rank specified by `r`. "
45
+ "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 8`}"
46
+ )
47
+ },
48
+ )
49
+ alpha_pattern: Optional[dict] = field(
50
+ default_factory=dict,
51
+ metadata={
52
+ "help": (
53
+ "The mapping from layer names or regexp expression to alphas which are different from the default alpha specified by `alpha`. "
54
+ "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 32`}"
55
+ )
56
+ },
57
+ )
58
+
59
+
60
+ class LycorisLayer(BaseTunerLayer):
61
+ r"""
62
+ A base layer for LyCORIS like adapters
63
+ """
64
+
65
+ # adapter_layer_names needs to be defined on the child class
66
+ other_param_names = ("r", "alpha", "scaling", "rank_dropout", "module_dropout")
67
+
68
+ def __init__(self, base_layer: nn.Module) -> None:
69
+ self.base_layer = base_layer
70
+ self.r = {}
71
+ self.alpha = {}
72
+ self.scaling = {}
73
+ self.rank_dropout = {}
74
+ self.module_dropout = {}
75
+
76
+ # Tuner info
77
+ self._disable_adapters = False
78
+ self.merged_adapters = []
79
+
80
+ @property
81
+ @abstractmethod
82
+ def _available_adapters(self) -> set[str]:
83
+ ...
84
+
85
+ def _init_empty_weights(self, cls, *args, **kwargs) -> None:
86
+ # A helper method that allows to initialize the layer of the given class without spending time to initialize the
87
+ # model weights. The implementation is inspired by
88
+ # https://pytorch.org/docs/stable/generated/torch.nn.utils.skip_init.html but this function cannot be used
89
+ # directly.
90
+ # Instead of this approach, it would be possible to bypass the __init__ of the class but that runs the risk of
91
+ # omitting important logic inside that __init__.
92
+ kwargs = kwargs.copy()
93
+ final_device = kwargs.pop("device", "cpu")
94
+ cls.__init__(self, *args, device="meta", **kwargs)
95
+ self.to_empty(device=final_device)
96
+
97
+ @abstractmethod
98
+ def create_adapter_parameters(self, adapter_name: str, r: int, **kwargs):
99
+ ...
100
+
101
+ # TODO: refactor LoRA to use the same approach
102
+ @abstractmethod
103
+ def _get_delta_activations(self, adapter_name: str, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
104
+ """Activations added on top of the base layer output (i.e. after the base layer forward pass)"""
105
+
106
+ @abstractmethod
107
+ def get_delta_weight(self, adapter_name: str) -> torch.Tensor:
108
+ ...
109
+
110
+ def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
111
+ """
112
+ Merge the active adapter weights into the base weights
113
+
114
+ Args:
115
+ safe_merge (`bool`, *optional*):
116
+ If `True`, the merge operation will be performed in a copy of the original weights and check for NaNs
117
+ before merging the weights. This is useful if you want to check if the merge operation will produce
118
+ NaNs. Defaults to `False`.
119
+ adapter_names (`List[str]`, *optional*):
120
+ The list of adapter names that should be merged. If `None`, all active adapters will be merged.
121
+ Defaults to `None`.
122
+ """
123
+ adapter_names = check_adapters_to_merge(self, adapter_names)
124
+ if not adapter_names:
125
+ # no adapter to merge
126
+ return
127
+
128
+ for active_adapter in adapter_names:
129
+ if active_adapter in self._available_adapters:
130
+ base_layer = self.get_base_layer()
131
+ if safe_merge:
132
+ orig_weights = base_layer.weight.data.clone()
133
+ orig_weights += self.get_delta_weight(active_adapter)
134
+
135
+ if not torch.isfinite(orig_weights).all():
136
+ raise ValueError(
137
+ f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
138
+ )
139
+
140
+ base_layer.weight.data = orig_weights
141
+ else:
142
+ base_layer.weight.data += self.get_delta_weight(active_adapter)
143
+ self.merged_adapters.append(active_adapter)
144
+
145
+ @abstractmethod
146
+ def reset_adapter_parameters(self, adapter_name: str):
147
+ ...
148
+
149
+ def set_scale(self, adapter, scale):
150
+ if adapter not in self._available_adapters:
151
+ # Ignore the case where the adapter is not in the layer
152
+ return
153
+ self.scaling[adapter] = scale * self.alpha[adapter] / self.r[adapter]
154
+
155
+ def scale_layer(self, scale: float) -> None:
156
+ if scale == 1:
157
+ return
158
+
159
+ for active_adapter in self.active_adapters:
160
+ if active_adapter not in self._available_adapters:
161
+ continue
162
+
163
+ self.scaling[active_adapter] *= scale
164
+
165
+ def unmerge(self) -> None:
166
+ """
167
+ This method unmerges all merged adapter layers from the base weights.
168
+ """
169
+ if not self.merged:
170
+ warnings.warn("Already unmerged. Nothing to do.")
171
+ return
172
+ while len(self.merged_adapters) > 0:
173
+ active_adapter = self.merged_adapters.pop()
174
+ if active_adapter in self._available_adapters:
175
+ self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter)
176
+
177
+ def unscale_layer(self, scale=None) -> None:
178
+ for active_adapter in self.active_adapters:
179
+ if active_adapter not in self._available_adapters:
180
+ continue
181
+
182
+ if scale is None:
183
+ self.scaling[active_adapter] = self.alpha[active_adapter] / self.r[active_adapter]
184
+ else:
185
+ self.scaling[active_adapter] /= scale
186
+
187
+ @abstractmethod
188
+ def update_layer(self, adapter_name: str, r: int, alpha: float, **kwargs):
189
+ ...
190
+
191
+
192
+ class LycorisTuner(BaseTuner):
193
+ r"""
194
+ A base tuner for LyCORIS like adapters
195
+ """
196
+
197
+ prefix: str
198
+ layers_mapping: dict[type[torch.nn.Module], type[LycorisLayer]]
199
+
200
+ def __init__(self, model, config, adapter_name):
201
+ super().__init__(model, config, adapter_name)
202
+
203
+ def __getattr__(self, name: str):
204
+ """Forward missing attributes to the wrapped module."""
205
+ try:
206
+ return super().__getattr__(name) # defer to nn.Module's logic
207
+ except AttributeError:
208
+ return getattr(self.model, name)
209
+
210
+ @staticmethod
211
+ def _check_target_module_exists(config, key):
212
+ return check_target_module_exists(config, key)
213
+
214
+ @abstractmethod
215
+ def _create_and_replace(
216
+ self,
217
+ config: LycorisConfig,
218
+ adapter_name: str,
219
+ target: Union[LycorisLayer, nn.Module],
220
+ target_name,
221
+ parent,
222
+ current_key,
223
+ ):
224
+ ...
225
+
226
+ @classmethod
227
+ def _create_new_module(cls, config: LycorisConfig, adapter_name: str, target: nn.Module, **kwargs) -> LycorisLayer:
228
+ # Find corresponding subtype of provided target module
229
+ new_module_cls = None
230
+ for subtype, target_cls in cls.layers_mapping.items():
231
+ if (
232
+ hasattr(target, "base_layer")
233
+ and isinstance(target.get_base_layer(), subtype)
234
+ and isinstance(target, BaseTunerLayer)
235
+ ):
236
+ # nested tuner layers are allowed
237
+ new_module_cls = target_cls
238
+ break
239
+ elif isinstance(target, subtype):
240
+ new_module_cls = target_cls
241
+ break
242
+
243
+ # We didn't find corresponding type, so adapter for this layer is not supported
244
+ if new_module_cls is None:
245
+ supported_modules = ", ".join(layer.__name__ for layer in cls.layers_mapping.keys())
246
+ raise ValueError(
247
+ f"Target module of type {type(target)} not supported, "
248
+ f"currently only adapters for {supported_modules} are supported"
249
+ )
250
+
251
+ if isinstance(target, BaseTunerLayer):
252
+ target_base_layer = target.get_base_layer()
253
+ else:
254
+ target_base_layer = target
255
+
256
+ if isinstance(target_base_layer, torch.nn.Conv2d):
257
+ new_module = new_module_cls(target, adapter_name=adapter_name, **kwargs)
258
+ elif isinstance(target_base_layer, torch.nn.Linear):
259
+ new_module = new_module_cls(target, adapter_name=adapter_name, **kwargs)
260
+ else:
261
+ supported_modules = ", ".join(layer.__name__ for layer in cls.layers_mapping.keys())
262
+ raise ValueError(
263
+ f"Target module of type {type(target)} not supported, "
264
+ f"currently only adapters for {supported_modules} are supported"
265
+ )
266
+
267
+ return new_module
268
+
269
+ def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None:
270
+ for n, p in model.named_parameters():
271
+ if self.prefix not in n:
272
+ p.requires_grad = False
273
+
274
+ @staticmethod
275
+ def _prepare_adapter_config(peft_config, model_config):
276
+ if peft_config.target_modules is None:
277
+ raise ValueError("Please specify `target_modules` in `peft_config`")
278
+ return peft_config
279
+
280
+ def _replace_module(self, parent, child_name, new_module, child):
281
+ setattr(parent, child_name, new_module)
282
+ # It's not necessary to set requires_grad here, as that is handled by
283
+ # _mark_only_adapters_as_trainable
284
+
285
+ if not hasattr(new_module, "base_layer"):
286
+ new_module.weight = child.weight
287
+ if hasattr(child, "bias"):
288
+ new_module.bias = child.bias
289
+
290
+ if getattr(child, "state", None) is not None:
291
+ if hasattr(new_module, "base_layer"):
292
+ new_module.base_layer.state = child.state
293
+ else:
294
+ new_module.state = child.state
295
+ new_module.to(child.weight.device)
296
+
297
+ # dispatch to correct device
298
+ for name, module in new_module.named_modules():
299
+ if self.prefix in name:
300
+ module.to(child.weight.device)
301
+
302
+ def _set_adapter_layers(self, enabled=True):
303
+ for module in self.model.modules():
304
+ if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)):
305
+ module.enable_adapters(enabled)
306
+
307
+ def _unload_and_optionally_merge(
308
+ self,
309
+ merge: bool = True,
310
+ progressbar: bool = False,
311
+ safe_merge: bool = False,
312
+ adapter_names: Optional[list[str]] = None,
313
+ ):
314
+ if merge:
315
+ if getattr(self.model, "quantization_method", None) == "gptq":
316
+ raise ValueError("Cannot merge LOHA layers when the model is gptq quantized")
317
+
318
+ self._unloading_checks(adapter_names)
319
+ key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key]
320
+ desc = "Unloading " + ("and merging " if merge else "") + "model"
321
+ for key in tqdm(key_list, disable=not progressbar, desc=desc):
322
+ try:
323
+ parent, target, target_name = _get_submodules(self.model, key)
324
+ except AttributeError:
325
+ continue
326
+
327
+ if hasattr(target, "base_layer"):
328
+ if merge:
329
+ target.merge(safe_merge=safe_merge, adapter_names=adapter_names)
330
+ self._replace_module(parent, target_name, target.get_base_layer(), target)
331
+ elif isinstance(target, ModulesToSaveWrapper):
332
+ # save any additional trainable modules part of `modules_to_save`
333
+ new_module = target.modules_to_save[target.active_adapter]
334
+ if hasattr(new_module, "base_layer"):
335
+ # check if the module is itself a tuner layer
336
+ if merge:
337
+ new_module.merge(safe_merge=safe_merge, adapter_names=adapter_names)
338
+ new_module = new_module.get_base_layer()
339
+ setattr(parent, target_name, new_module)
340
+
341
+ return self.model
342
+
343
+ def enable_adapter_layers(self) -> None:
344
+ """Enable all adapters.
345
+
346
+ Call this if you have previously disabled all adapters and want to re-enable them.
347
+ """
348
+ self._set_adapter_layers(enabled=True)
349
+
350
+ def disable_adapter_layers(self) -> None:
351
+ """Disable all adapters.
352
+
353
+ When disabling all adapters, the model output corresponds to the output of the base model.
354
+ """
355
+ self._set_adapter_layers(enabled=False)
356
+
357
+ def merge_and_unload(
358
+ self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None
359
+ ) -> torch.nn.Module:
360
+ r"""
361
+ This method merges the adapter layers into the base model. This is needed if someone wants to use the base
362
+ model as a standalone model.
363
+
364
+ Args:
365
+ progressbar (`bool`):
366
+ whether to show a progressbar indicating the unload and merge process
367
+ safe_merge (`bool`):
368
+ whether to activate the safe merging check to check if there is any potential Nan in the adapter
369
+ weights
370
+ adapter_names (`List[str]`, *optional*):
371
+ The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
372
+ to `None`.
373
+
374
+ """
375
+ return self._unload_and_optionally_merge(
376
+ progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names
377
+ )
378
+
379
+ def unload(self) -> torch.nn.Module:
380
+ """
381
+ Gets back the base model by removing all the lora modules without merging. This gives back the original base
382
+ model.
383
+ """
384
+ return self._unload_and_optionally_merge(merge=False)
385
+
386
+ def set_adapter(self, adapter_name: str | list[str]) -> None:
387
+ """Set the active adapter(s).
388
+
389
+ Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is
390
+ not desired, use the following code.
391
+
392
+ ```py
393
+ >>> for name, param in model_peft.named_parameters():
394
+ ... if ...: # some check on name (ex. if 'lora' in name)
395
+ ... param.requires_grad = False
396
+ ```
397
+
398
+ Args:
399
+ adapter_name (`str` or `list[str]`): Name of the adapter(s) to be activated.
400
+ """
401
+ for module in self.model.modules():
402
+ if isinstance(module, LycorisLayer):
403
+ if module.merged:
404
+ warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.")
405
+ module.unmerge()
406
+ module.set_adapter(adapter_name)
407
+
408
+ def delete_adapter(self, adapter_name: str) -> None:
409
+ """
410
+ Deletes an existing adapter.
411
+
412
+ Args:
413
+ adapter_name (`str`): Name of the adapter to be deleted.
414
+ """
415
+ if adapter_name not in list(self.peft_config.keys()):
416
+ raise ValueError(f"Adapter {adapter_name} does not exist")
417
+ del self.peft_config[adapter_name]
418
+
419
+ key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key]
420
+ new_adapter = None
421
+ for key in key_list:
422
+ _, target, _ = _get_submodules(self.model, key)
423
+ if isinstance(target, LycorisLayer):
424
+ target.delete_adapter(adapter_name)
425
+ if new_adapter is None:
426
+ new_adapter = target.active_adapters[:]
427
+
428
+ self.active_adapter = new_adapter or []
venv/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .config import MultitaskPromptTuningConfig, MultitaskPromptTuningInit
16
+ from .model import MultitaskPromptEmbedding
17
+
18
+
19
+ __all__ = ["MultitaskPromptTuningConfig", "MultitaskPromptTuningInit", "MultitaskPromptEmbedding"]
venv/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (387 Bytes). View file
 
venv/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/config.cpython-310.pyc ADDED
Binary file (1.84 kB). View file
 
venv/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/model.cpython-310.pyc ADDED
Binary file (2.42 kB). View file
 
venv/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/config.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import enum
16
+ from dataclasses import dataclass, field
17
+ from typing import Optional, Union
18
+
19
+ from peft.tuners.prompt_tuning import PromptTuningConfig
20
+ from peft.utils import PeftType
21
+
22
+
23
+ class MultitaskPromptTuningInit(str, enum.Enum):
24
+ # initialize prompt with text
25
+ TEXT = "TEXT"
26
+ # initialize prompt with random matrix
27
+ RANDOM = "RANDOM"
28
+ # average the prefix and column matrices obtained during source training
29
+ AVERAGE_SOURCE_TASKS = "AVERAGE_SOURCE_TASKS"
30
+ # pick prefix and column matrices for a particular task obtained during source training
31
+ EXACT_SOURCE_TASK = "EXACT_SOURCE_TASK"
32
+ # only use the prompt embeddings trained during source training
33
+ ONLY_SOURCE_SHARED = "ONLY_SOURCE_SHARED"
34
+
35
+
36
+ @dataclass
37
+ class MultitaskPromptTuningConfig(PromptTuningConfig):
38
+ prompt_tuning_init: Union[MultitaskPromptTuningInit, str] = field(
39
+ default=MultitaskPromptTuningInit.RANDOM,
40
+ metadata={
41
+ "help": (
42
+ "How to initialize the prompt tuning parameters. Can be one of TEXT, RANDOM, AVERAGE_SOURCE_TASKS, "
43
+ "EXACT_SOURCE_TASK, ONLY_SOURCE_SHARED."
44
+ ),
45
+ },
46
+ )
47
+ prompt_tuning_init_state_dict_path: Optional[str] = field(
48
+ default=None,
49
+ metadata={
50
+ "help": (
51
+ "The path of source state dict. This is required when training the downstream target prompt from "
52
+ "the pretrained source prompt"
53
+ ),
54
+ },
55
+ )
56
+ prompt_tuning_init_task: Optional[int] = field(default=0, metadata={"help": "source task id for initialization"})
57
+ num_ranks: Optional[int] = field(default=1, metadata={"help": "ranks"})
58
+ num_tasks: Optional[int] = field(default=1, metadata={"help": "number of tasks"})
59
+
60
+ def __post_init__(self):
61
+ self.peft_type = PeftType.MULTITASK_PROMPT_TUNING
venv/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/model.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import torch
16
+
17
+ from peft.tuners.prompt_tuning import PromptEmbedding
18
+ from peft.utils import TaskType
19
+
20
+ from .config import MultitaskPromptTuningConfig, MultitaskPromptTuningInit
21
+
22
+
23
+ # This code is adapted for the paper: https://arxiv.org/abs/2303.02861 and
24
+ # constitutes the work done at MIT-IBM Watson Research Lab.
25
+
26
+
27
+ class MultitaskPromptEmbedding(PromptEmbedding):
28
+ def __init__(self, config: MultitaskPromptTuningConfig, word_embeddings):
29
+ super().__init__(config, word_embeddings)
30
+
31
+ self.num_tasks = config.num_tasks
32
+ self.num_ranks = config.num_ranks
33
+ self.num_virtual_tokens = config.num_virtual_tokens
34
+
35
+ self.num_transformer_submodules = config.num_transformer_submodules
36
+ if self.num_transformer_submodules is None:
37
+ self.num_transformer_submodules = 2 if config.task_type == TaskType.SEQ_2_SEQ_LM else 1
38
+
39
+ self.token_dim = config.token_dim
40
+
41
+ total_virtual_tokens = self.num_virtual_tokens * self.num_transformer_submodules
42
+
43
+ self.prefix_task_cols = torch.nn.Parameter(
44
+ torch.normal(
45
+ mean=0,
46
+ std=0.02,
47
+ size=(self.num_tasks, total_virtual_tokens, self.num_ranks),
48
+ )
49
+ )
50
+ self.prefix_task_rows = torch.nn.Parameter(
51
+ torch.normal(
52
+ mean=0,
53
+ std=0.02,
54
+ size=(self.num_tasks, self.num_ranks, self.token_dim),
55
+ )
56
+ )
57
+
58
+ if config.prompt_tuning_init in [
59
+ MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS,
60
+ MultitaskPromptTuningInit.EXACT_SOURCE_TASK,
61
+ MultitaskPromptTuningInit.ONLY_SOURCE_SHARED,
62
+ ]:
63
+ if config.prompt_tuning_init_state_dict_path is None:
64
+ raise ValueError(
65
+ f"prompt_tuning_init_state_dict_path needs to be specified with {config.prompt_tuning_init} "
66
+ "init method"
67
+ )
68
+
69
+ # TODO: There should be an option for safetensors
70
+ state_dict: dict = torch.load(
71
+ config.prompt_tuning_init_state_dict_path,
72
+ map_location=word_embeddings.weight.device,
73
+ )
74
+
75
+ if config.prompt_tuning_init in [
76
+ MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS,
77
+ MultitaskPromptTuningInit.EXACT_SOURCE_TASK,
78
+ ]:
79
+ prefix_task_cols_: torch.Tensor = state_dict["prefix_task_cols"]
80
+ prefix_task_rows_: torch.Tensor = state_dict["prefix_task_rows"]
81
+
82
+ if config.prompt_tuning_init == MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS:
83
+ prefix_task_cols_ = prefix_task_cols_.mean(0, keepdim=True)
84
+ prefix_task_rows_ = prefix_task_rows_.mean(0, keepdim=True)
85
+ elif config.prompt_tuning_init == MultitaskPromptTuningInit.EXACT_SOURCE_TASK:
86
+ prefix_task_cols_ = prefix_task_cols_[config.prompt_tuning_init_task, ...].unsqueeze(0)
87
+ prefix_task_rows_ = prefix_task_rows_[config.prompt_tuning_init_task, ...].unsqueeze(0)
88
+
89
+ state_dict = {
90
+ "embedding.weight": state_dict["prompt_embeddings"],
91
+ "prefix_task_cols": prefix_task_cols_,
92
+ "prefix_task_rows": prefix_task_rows_,
93
+ }
94
+
95
+ self.load_state_dict(state_dict, strict=True)
96
+ elif config.prompt_tuning_init == MultitaskPromptTuningInit.ONLY_SOURCE_SHARED:
97
+ state_dict = {
98
+ "embedding.weight": state_dict["prompt_embeddings"],
99
+ }
100
+
101
+ self.load_state_dict(state_dict, strict=False)
102
+
103
+ def forward(self, indices, task_ids):
104
+ if task_ids is None:
105
+ raise ValueError("task_ids cannot be None")
106
+
107
+ prompt_embeddings = self.embedding(indices)
108
+
109
+ task_cols = torch.index_select(self.prefix_task_cols, 0, task_ids)
110
+ task_rows = torch.index_select(self.prefix_task_rows, 0, task_ids)
111
+ task_prompts = torch.matmul(task_cols, task_rows)
112
+
113
+ prompt_embeddings *= task_prompts
114
+
115
+ return prompt_embeddings
venv/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .config import PrefixTuningConfig
16
+ from .model import PrefixEncoder
17
+
18
+
19
+ __all__ = ["PrefixTuningConfig", "PrefixEncoder"]
venv/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (314 Bytes). View file
 
venv/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__pycache__/config.cpython-310.pyc ADDED
Binary file (1.22 kB). View file
 
venv/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__pycache__/model.cpython-310.pyc ADDED
Binary file (2.29 kB). View file