applied-ai-018 commited on
Commit
5f0de57
·
verified ·
1 Parent(s): ca23f89

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. ckpts/universal/global_step20/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step20/zero/18.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step20/zero/8.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  5. ckpts/universal/global_step20/zero/9.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  6. lm-evaluation-harness/wandb/run-20240514_114444-iactyeb3/files/wandb-metadata.json +810 -0
  7. venv/lib/python3.10/site-packages/nvidia/cublas/lib/libcublas.so.12 +3 -0
  8. venv/lib/python3.10/site-packages/transformers/models/clap/modeling_clap.py +0 -0
  9. venv/lib/python3.10/site-packages/transformers/models/clap/processing_clap.py +117 -0
  10. venv/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/__init__.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/configuration_open_llama.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/modeling_open_llama.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/configuration_open_llama.py +170 -0
  14. venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__init__.py +73 -0
  15. venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/__init__.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/configuration_retribert.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/modeling_retribert.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/tokenization_retribert.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/tokenization_retribert_fast.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/configuration_retribert.py +107 -0
  21. venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/modeling_retribert.py +218 -0
  22. venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert.py +517 -0
  23. venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert_fast.py +180 -0
  24. venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__init__.py +97 -0
  25. venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/__init__.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/configuration_transfo_xl.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/convert_transfo_xl_original_tf_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_tf_transfo_xl.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_tf_transfo_xl_utilities.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_transfo_xl.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_transfo_xl_utilities.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/tokenization_transfo_xl.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/configuration_transfo_xl.py +189 -0
  34. venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py +121 -0
  35. venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl.py +1122 -0
  36. venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl_utilities.py +179 -0
  37. venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py +1295 -0
  38. venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_transfo_xl_utilities.py +252 -0
  39. venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/tokenization_transfo_xl.py +819 -0
  40. venv/lib/python3.10/site-packages/transformers/models/nezha/__init__.py +69 -0
  41. venv/lib/python3.10/site-packages/transformers/models/nezha/__pycache__/__init__.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/transformers/models/nezha/__pycache__/configuration_nezha.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/transformers/models/nezha/__pycache__/modeling_nezha.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/transformers/models/nezha/configuration_nezha.py +103 -0
  45. venv/lib/python3.10/site-packages/transformers/models/nezha/modeling_nezha.py +1693 -0
  46. venv/lib/python3.10/site-packages/transformers/models/opt/__init__.py +101 -0
  47. venv/lib/python3.10/site-packages/transformers/models/opt/__pycache__/__init__.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/transformers/models/opt/__pycache__/configuration_opt.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/transformers/models/opt/__pycache__/convert_opt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/transformers/models/opt/__pycache__/modeling_flax_opt.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -90,3 +90,4 @@ venv/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.11 filter=lfs di
90
  venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so filter=lfs diff=lfs merge=lfs -text
91
  venv/lib/python3.10/site-packages/nvidia/nvjitlink/lib/libnvJitLink.so.12 filter=lfs diff=lfs merge=lfs -text
92
  venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops_train.so.8 filter=lfs diff=lfs merge=lfs -text
 
 
90
  venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so filter=lfs diff=lfs merge=lfs -text
91
  venv/lib/python3.10/site-packages/nvidia/nvjitlink/lib/libnvJitLink.so.12 filter=lfs diff=lfs merge=lfs -text
92
  venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops_train.so.8 filter=lfs diff=lfs merge=lfs -text
93
+ venv/lib/python3.10/site-packages/nvidia/cublas/lib/libcublas.so.12 filter=lfs diff=lfs merge=lfs -text
ckpts/universal/global_step20/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0583b3e1b5715c7324a5a96846f984529327ce654fe6e4ff751c55d08a1f4120
3
+ size 33555612
ckpts/universal/global_step20/zero/18.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a574574e1cf4f612f36c5a950de7dbd5a2fbe95d3ee30fdceb0e508c94e83eb
3
+ size 33555533
ckpts/universal/global_step20/zero/8.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:391ae28bf7845a2699f795228e5c7b365b28a4d85132194dcf94a12eb4f44725
3
+ size 33555533
ckpts/universal/global_step20/zero/9.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:299c8182eea4098a354774bfd621db676c0baeae084f0059983af42bbf72f1f7
3
+ size 50332843
lm-evaluation-harness/wandb/run-20240514_114444-iactyeb3/files/wandb-metadata.json ADDED
@@ -0,0 +1,810 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35",
3
+ "python": "3.10.12",
4
+ "heartbeatAt": "2024-05-14T11:44:44.666189",
5
+ "startedAt": "2024-05-14T11:44:44.209643",
6
+ "docker": null,
7
+ "cuda": null,
8
+ "args": [
9
+ "--model",
10
+ "hf",
11
+ "--model_args",
12
+ "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100",
13
+ "--tasks",
14
+ "indiccopa-hi",
15
+ "--batch_size",
16
+ "auto",
17
+ "--wandb_args",
18
+ "project=bharatgpt,group=global_step100"
19
+ ],
20
+ "state": "running",
21
+ "program": "-m lm_eval.__main__",
22
+ "codePathLocal": null,
23
+ "git": {
24
+ "remote": "https://github.com/EleutherAI/lm-evaluation-harness",
25
+ "commit": null
26
+ },
27
+ "email": null,
28
+ "root": "/data/cronscript/lm-evaluation-harness",
29
+ "host": "vizzhy-150-3",
30
+ "username": "root",
31
+ "executable": "/usr/bin/python3",
32
+ "cpu_count": 76,
33
+ "cpu_count_logical": 152,
34
+ "cpu_freq": {
35
+ "current": 3384.3174802631584,
36
+ "min": 800.0,
37
+ "max": 3400.0
38
+ },
39
+ "cpu_freq_per_core": [
40
+ {
41
+ "current": 3251.15,
42
+ "min": 800.0,
43
+ "max": 3400.0
44
+ },
45
+ {
46
+ "current": 3251.225,
47
+ "min": 800.0,
48
+ "max": 3400.0
49
+ },
50
+ {
51
+ "current": 3400.0,
52
+ "min": 800.0,
53
+ "max": 3400.0
54
+ },
55
+ {
56
+ "current": 3400.0,
57
+ "min": 800.0,
58
+ "max": 3400.0
59
+ },
60
+ {
61
+ "current": 3400.0,
62
+ "min": 800.0,
63
+ "max": 3400.0
64
+ },
65
+ {
66
+ "current": 3400.0,
67
+ "min": 800.0,
68
+ "max": 3400.0
69
+ },
70
+ {
71
+ "current": 3400.0,
72
+ "min": 800.0,
73
+ "max": 3400.0
74
+ },
75
+ {
76
+ "current": 3400.0,
77
+ "min": 800.0,
78
+ "max": 3400.0
79
+ },
80
+ {
81
+ "current": 3400.0,
82
+ "min": 800.0,
83
+ "max": 3400.0
84
+ },
85
+ {
86
+ "current": 3400.0,
87
+ "min": 800.0,
88
+ "max": 3400.0
89
+ },
90
+ {
91
+ "current": 3400.0,
92
+ "min": 800.0,
93
+ "max": 3400.0
94
+ },
95
+ {
96
+ "current": 3400.0,
97
+ "min": 800.0,
98
+ "max": 3400.0
99
+ },
100
+ {
101
+ "current": 3400.0,
102
+ "min": 800.0,
103
+ "max": 3400.0
104
+ },
105
+ {
106
+ "current": 3400.0,
107
+ "min": 800.0,
108
+ "max": 3400.0
109
+ },
110
+ {
111
+ "current": 3400.0,
112
+ "min": 800.0,
113
+ "max": 3400.0
114
+ },
115
+ {
116
+ "current": 3400.0,
117
+ "min": 800.0,
118
+ "max": 3400.0
119
+ },
120
+ {
121
+ "current": 3400.0,
122
+ "min": 800.0,
123
+ "max": 3400.0
124
+ },
125
+ {
126
+ "current": 3400.0,
127
+ "min": 800.0,
128
+ "max": 3400.0
129
+ },
130
+ {
131
+ "current": 3400.0,
132
+ "min": 800.0,
133
+ "max": 3400.0
134
+ },
135
+ {
136
+ "current": 3236.114,
137
+ "min": 800.0,
138
+ "max": 3400.0
139
+ },
140
+ {
141
+ "current": 3285.304,
142
+ "min": 800.0,
143
+ "max": 3400.0
144
+ },
145
+ {
146
+ "current": 3400.0,
147
+ "min": 800.0,
148
+ "max": 3400.0
149
+ },
150
+ {
151
+ "current": 3400.0,
152
+ "min": 800.0,
153
+ "max": 3400.0
154
+ },
155
+ {
156
+ "current": 3400.0,
157
+ "min": 800.0,
158
+ "max": 3400.0
159
+ },
160
+ {
161
+ "current": 3400.0,
162
+ "min": 800.0,
163
+ "max": 3400.0
164
+ },
165
+ {
166
+ "current": 3400.0,
167
+ "min": 800.0,
168
+ "max": 3400.0
169
+ },
170
+ {
171
+ "current": 3400.0,
172
+ "min": 800.0,
173
+ "max": 3400.0
174
+ },
175
+ {
176
+ "current": 3400.0,
177
+ "min": 800.0,
178
+ "max": 3400.0
179
+ },
180
+ {
181
+ "current": 3400.0,
182
+ "min": 800.0,
183
+ "max": 3400.0
184
+ },
185
+ {
186
+ "current": 3400.0,
187
+ "min": 800.0,
188
+ "max": 3400.0
189
+ },
190
+ {
191
+ "current": 3285.185,
192
+ "min": 800.0,
193
+ "max": 3400.0
194
+ },
195
+ {
196
+ "current": 3400.0,
197
+ "min": 800.0,
198
+ "max": 3400.0
199
+ },
200
+ {
201
+ "current": 3400.0,
202
+ "min": 800.0,
203
+ "max": 3400.0
204
+ },
205
+ {
206
+ "current": 3400.0,
207
+ "min": 800.0,
208
+ "max": 3400.0
209
+ },
210
+ {
211
+ "current": 3400.0,
212
+ "min": 800.0,
213
+ "max": 3400.0
214
+ },
215
+ {
216
+ "current": 3400.0,
217
+ "min": 800.0,
218
+ "max": 3400.0
219
+ },
220
+ {
221
+ "current": 3400.0,
222
+ "min": 800.0,
223
+ "max": 3400.0
224
+ },
225
+ {
226
+ "current": 3400.0,
227
+ "min": 800.0,
228
+ "max": 3400.0
229
+ },
230
+ {
231
+ "current": 3223.021,
232
+ "min": 800.0,
233
+ "max": 3400.0
234
+ },
235
+ {
236
+ "current": 3223.232,
237
+ "min": 800.0,
238
+ "max": 3400.0
239
+ },
240
+ {
241
+ "current": 3400.0,
242
+ "min": 800.0,
243
+ "max": 3400.0
244
+ },
245
+ {
246
+ "current": 3400.0,
247
+ "min": 800.0,
248
+ "max": 3400.0
249
+ },
250
+ {
251
+ "current": 3400.0,
252
+ "min": 800.0,
253
+ "max": 3400.0
254
+ },
255
+ {
256
+ "current": 3400.0,
257
+ "min": 800.0,
258
+ "max": 3400.0
259
+ },
260
+ {
261
+ "current": 3400.0,
262
+ "min": 800.0,
263
+ "max": 3400.0
264
+ },
265
+ {
266
+ "current": 3400.0,
267
+ "min": 800.0,
268
+ "max": 3400.0
269
+ },
270
+ {
271
+ "current": 3400.0,
272
+ "min": 800.0,
273
+ "max": 3400.0
274
+ },
275
+ {
276
+ "current": 3400.0,
277
+ "min": 800.0,
278
+ "max": 3400.0
279
+ },
280
+ {
281
+ "current": 3400.0,
282
+ "min": 800.0,
283
+ "max": 3400.0
284
+ },
285
+ {
286
+ "current": 3400.0,
287
+ "min": 800.0,
288
+ "max": 3400.0
289
+ },
290
+ {
291
+ "current": 3400.0,
292
+ "min": 800.0,
293
+ "max": 3400.0
294
+ },
295
+ {
296
+ "current": 3400.0,
297
+ "min": 800.0,
298
+ "max": 3400.0
299
+ },
300
+ {
301
+ "current": 3400.0,
302
+ "min": 800.0,
303
+ "max": 3400.0
304
+ },
305
+ {
306
+ "current": 3400.0,
307
+ "min": 800.0,
308
+ "max": 3400.0
309
+ },
310
+ {
311
+ "current": 3400.0,
312
+ "min": 800.0,
313
+ "max": 3400.0
314
+ },
315
+ {
316
+ "current": 3400.0,
317
+ "min": 800.0,
318
+ "max": 3400.0
319
+ },
320
+ {
321
+ "current": 3400.0,
322
+ "min": 800.0,
323
+ "max": 3400.0
324
+ },
325
+ {
326
+ "current": 3400.0,
327
+ "min": 800.0,
328
+ "max": 3400.0
329
+ },
330
+ {
331
+ "current": 3400.0,
332
+ "min": 800.0,
333
+ "max": 3400.0
334
+ },
335
+ {
336
+ "current": 3400.0,
337
+ "min": 800.0,
338
+ "max": 3400.0
339
+ },
340
+ {
341
+ "current": 3400.0,
342
+ "min": 800.0,
343
+ "max": 3400.0
344
+ },
345
+ {
346
+ "current": 3291.956,
347
+ "min": 800.0,
348
+ "max": 3400.0
349
+ },
350
+ {
351
+ "current": 3400.0,
352
+ "min": 800.0,
353
+ "max": 3400.0
354
+ },
355
+ {
356
+ "current": 3400.0,
357
+ "min": 800.0,
358
+ "max": 3400.0
359
+ },
360
+ {
361
+ "current": 3400.0,
362
+ "min": 800.0,
363
+ "max": 3400.0
364
+ },
365
+ {
366
+ "current": 3400.0,
367
+ "min": 800.0,
368
+ "max": 3400.0
369
+ },
370
+ {
371
+ "current": 3400.0,
372
+ "min": 800.0,
373
+ "max": 3400.0
374
+ },
375
+ {
376
+ "current": 3400.0,
377
+ "min": 800.0,
378
+ "max": 3400.0
379
+ },
380
+ {
381
+ "current": 3400.0,
382
+ "min": 800.0,
383
+ "max": 3400.0
384
+ },
385
+ {
386
+ "current": 3400.0,
387
+ "min": 800.0,
388
+ "max": 3400.0
389
+ },
390
+ {
391
+ "current": 3400.0,
392
+ "min": 800.0,
393
+ "max": 3400.0
394
+ },
395
+ {
396
+ "current": 3400.0,
397
+ "min": 800.0,
398
+ "max": 3400.0
399
+ },
400
+ {
401
+ "current": 3400.0,
402
+ "min": 800.0,
403
+ "max": 3400.0
404
+ },
405
+ {
406
+ "current": 3400.0,
407
+ "min": 800.0,
408
+ "max": 3400.0
409
+ },
410
+ {
411
+ "current": 3229.554,
412
+ "min": 800.0,
413
+ "max": 3400.0
414
+ },
415
+ {
416
+ "current": 3400.0,
417
+ "min": 800.0,
418
+ "max": 3400.0
419
+ },
420
+ {
421
+ "current": 3400.0,
422
+ "min": 800.0,
423
+ "max": 3400.0
424
+ },
425
+ {
426
+ "current": 3400.0,
427
+ "min": 800.0,
428
+ "max": 3400.0
429
+ },
430
+ {
431
+ "current": 3400.0,
432
+ "min": 800.0,
433
+ "max": 3400.0
434
+ },
435
+ {
436
+ "current": 3400.0,
437
+ "min": 800.0,
438
+ "max": 3400.0
439
+ },
440
+ {
441
+ "current": 3400.0,
442
+ "min": 800.0,
443
+ "max": 3400.0
444
+ },
445
+ {
446
+ "current": 3400.0,
447
+ "min": 800.0,
448
+ "max": 3400.0
449
+ },
450
+ {
451
+ "current": 3400.0,
452
+ "min": 800.0,
453
+ "max": 3400.0
454
+ },
455
+ {
456
+ "current": 3400.0,
457
+ "min": 800.0,
458
+ "max": 3400.0
459
+ },
460
+ {
461
+ "current": 3400.0,
462
+ "min": 800.0,
463
+ "max": 3400.0
464
+ },
465
+ {
466
+ "current": 3400.0,
467
+ "min": 800.0,
468
+ "max": 3400.0
469
+ },
470
+ {
471
+ "current": 3148.525,
472
+ "min": 800.0,
473
+ "max": 3400.0
474
+ },
475
+ {
476
+ "current": 3400.0,
477
+ "min": 800.0,
478
+ "max": 3400.0
479
+ },
480
+ {
481
+ "current": 3400.0,
482
+ "min": 800.0,
483
+ "max": 3400.0
484
+ },
485
+ {
486
+ "current": 3400.0,
487
+ "min": 800.0,
488
+ "max": 3400.0
489
+ },
490
+ {
491
+ "current": 3400.0,
492
+ "min": 800.0,
493
+ "max": 3400.0
494
+ },
495
+ {
496
+ "current": 3400.0,
497
+ "min": 800.0,
498
+ "max": 3400.0
499
+ },
500
+ {
501
+ "current": 3400.0,
502
+ "min": 800.0,
503
+ "max": 3400.0
504
+ },
505
+ {
506
+ "current": 3400.0,
507
+ "min": 800.0,
508
+ "max": 3400.0
509
+ },
510
+ {
511
+ "current": 3400.0,
512
+ "min": 800.0,
513
+ "max": 3400.0
514
+ },
515
+ {
516
+ "current": 3400.0,
517
+ "min": 800.0,
518
+ "max": 3400.0
519
+ },
520
+ {
521
+ "current": 3400.0,
522
+ "min": 800.0,
523
+ "max": 3400.0
524
+ },
525
+ {
526
+ "current": 3400.0,
527
+ "min": 800.0,
528
+ "max": 3400.0
529
+ },
530
+ {
531
+ "current": 3400.0,
532
+ "min": 800.0,
533
+ "max": 3400.0
534
+ },
535
+ {
536
+ "current": 3400.0,
537
+ "min": 800.0,
538
+ "max": 3400.0
539
+ },
540
+ {
541
+ "current": 3400.0,
542
+ "min": 800.0,
543
+ "max": 3400.0
544
+ },
545
+ {
546
+ "current": 3400.0,
547
+ "min": 800.0,
548
+ "max": 3400.0
549
+ },
550
+ {
551
+ "current": 3400.0,
552
+ "min": 800.0,
553
+ "max": 3400.0
554
+ },
555
+ {
556
+ "current": 3400.0,
557
+ "min": 800.0,
558
+ "max": 3400.0
559
+ },
560
+ {
561
+ "current": 3285.184,
562
+ "min": 800.0,
563
+ "max": 3400.0
564
+ },
565
+ {
566
+ "current": 3400.0,
567
+ "min": 800.0,
568
+ "max": 3400.0
569
+ },
570
+ {
571
+ "current": 3400.0,
572
+ "min": 800.0,
573
+ "max": 3400.0
574
+ },
575
+ {
576
+ "current": 3400.0,
577
+ "min": 800.0,
578
+ "max": 3400.0
579
+ },
580
+ {
581
+ "current": 3400.0,
582
+ "min": 800.0,
583
+ "max": 3400.0
584
+ },
585
+ {
586
+ "current": 3400.0,
587
+ "min": 800.0,
588
+ "max": 3400.0
589
+ },
590
+ {
591
+ "current": 3400.0,
592
+ "min": 800.0,
593
+ "max": 3400.0
594
+ },
595
+ {
596
+ "current": 3400.0,
597
+ "min": 800.0,
598
+ "max": 3400.0
599
+ },
600
+ {
601
+ "current": 3400.0,
602
+ "min": 800.0,
603
+ "max": 3400.0
604
+ },
605
+ {
606
+ "current": 3400.0,
607
+ "min": 800.0,
608
+ "max": 3400.0
609
+ },
610
+ {
611
+ "current": 3400.0,
612
+ "min": 800.0,
613
+ "max": 3400.0
614
+ },
615
+ {
616
+ "current": 3400.0,
617
+ "min": 800.0,
618
+ "max": 3400.0
619
+ },
620
+ {
621
+ "current": 3400.0,
622
+ "min": 800.0,
623
+ "max": 3400.0
624
+ },
625
+ {
626
+ "current": 3400.0,
627
+ "min": 800.0,
628
+ "max": 3400.0
629
+ },
630
+ {
631
+ "current": 3400.0,
632
+ "min": 800.0,
633
+ "max": 3400.0
634
+ },
635
+ {
636
+ "current": 3400.0,
637
+ "min": 800.0,
638
+ "max": 3400.0
639
+ },
640
+ {
641
+ "current": 3400.0,
642
+ "min": 800.0,
643
+ "max": 3400.0
644
+ },
645
+ {
646
+ "current": 3400.0,
647
+ "min": 800.0,
648
+ "max": 3400.0
649
+ },
650
+ {
651
+ "current": 3400.0,
652
+ "min": 800.0,
653
+ "max": 3400.0
654
+ },
655
+ {
656
+ "current": 3400.0,
657
+ "min": 800.0,
658
+ "max": 3400.0
659
+ },
660
+ {
661
+ "current": 3400.0,
662
+ "min": 800.0,
663
+ "max": 3400.0
664
+ },
665
+ {
666
+ "current": 3400.0,
667
+ "min": 800.0,
668
+ "max": 3400.0
669
+ },
670
+ {
671
+ "current": 3400.0,
672
+ "min": 800.0,
673
+ "max": 3400.0
674
+ },
675
+ {
676
+ "current": 3400.0,
677
+ "min": 800.0,
678
+ "max": 3400.0
679
+ },
680
+ {
681
+ "current": 3400.0,
682
+ "min": 800.0,
683
+ "max": 3400.0
684
+ },
685
+ {
686
+ "current": 3400.0,
687
+ "min": 800.0,
688
+ "max": 3400.0
689
+ },
690
+ {
691
+ "current": 3400.0,
692
+ "min": 800.0,
693
+ "max": 3400.0
694
+ },
695
+ {
696
+ "current": 3400.0,
697
+ "min": 800.0,
698
+ "max": 3400.0
699
+ },
700
+ {
701
+ "current": 3400.0,
702
+ "min": 800.0,
703
+ "max": 3400.0
704
+ },
705
+ {
706
+ "current": 3400.0,
707
+ "min": 800.0,
708
+ "max": 3400.0
709
+ },
710
+ {
711
+ "current": 3400.0,
712
+ "min": 800.0,
713
+ "max": 3400.0
714
+ },
715
+ {
716
+ "current": 3400.0,
717
+ "min": 800.0,
718
+ "max": 3400.0
719
+ },
720
+ {
721
+ "current": 3400.0,
722
+ "min": 800.0,
723
+ "max": 3400.0
724
+ },
725
+ {
726
+ "current": 3221.545,
727
+ "min": 800.0,
728
+ "max": 3400.0
729
+ },
730
+ {
731
+ "current": 3400.0,
732
+ "min": 800.0,
733
+ "max": 3400.0
734
+ },
735
+ {
736
+ "current": 3400.0,
737
+ "min": 800.0,
738
+ "max": 3400.0
739
+ },
740
+ {
741
+ "current": 3400.0,
742
+ "min": 800.0,
743
+ "max": 3400.0
744
+ },
745
+ {
746
+ "current": 3400.0,
747
+ "min": 800.0,
748
+ "max": 3400.0
749
+ },
750
+ {
751
+ "current": 3400.0,
752
+ "min": 800.0,
753
+ "max": 3400.0
754
+ },
755
+ {
756
+ "current": 3400.0,
757
+ "min": 800.0,
758
+ "max": 3400.0
759
+ },
760
+ {
761
+ "current": 3400.0,
762
+ "min": 800.0,
763
+ "max": 3400.0
764
+ },
765
+ {
766
+ "current": 3400.0,
767
+ "min": 800.0,
768
+ "max": 3400.0
769
+ },
770
+ {
771
+ "current": 3400.0,
772
+ "min": 800.0,
773
+ "max": 3400.0
774
+ },
775
+ {
776
+ "current": 3400.0,
777
+ "min": 800.0,
778
+ "max": 3400.0
779
+ },
780
+ {
781
+ "current": 3400.0,
782
+ "min": 800.0,
783
+ "max": 3400.0
784
+ },
785
+ {
786
+ "current": 3400.0,
787
+ "min": 800.0,
788
+ "max": 3400.0
789
+ },
790
+ {
791
+ "current": 3400.0,
792
+ "min": 800.0,
793
+ "max": 3400.0
794
+ },
795
+ {
796
+ "current": 3400.0,
797
+ "min": 800.0,
798
+ "max": 3400.0
799
+ }
800
+ ],
801
+ "disk": {
802
+ "/": {
803
+ "total": 866.4415092468262,
804
+ "used": 77.77706527709961
805
+ }
806
+ },
807
+ "memory": {
808
+ "total": 1007.5000267028809
809
+ }
810
+ }
venv/lib/python3.10/site-packages/nvidia/cublas/lib/libcublas.so.12 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37d1141111d6b935aa84156ad61fd3575350cad8d69d0916e9db7d37eef9b816
3
+ size 107473968
venv/lib/python3.10/site-packages/transformers/models/clap/modeling_clap.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/transformers/models/clap/processing_clap.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Audio/Text processor class for CLAP
17
+ """
18
+
19
+ from ...processing_utils import ProcessorMixin
20
+ from ...tokenization_utils_base import BatchEncoding
21
+
22
+
23
+ class ClapProcessor(ProcessorMixin):
24
+ r"""
25
+ Constructs a CLAP processor which wraps a CLAP feature extractor and a RoBerta tokenizer into a single processor.
26
+
27
+ [`ClapProcessor`] offers all the functionalities of [`ClapFeatureExtractor`] and [`RobertaTokenizerFast`]. See the
28
+ [`~ClapProcessor.__call__`] and [`~ClapProcessor.decode`] for more information.
29
+
30
+ Args:
31
+ feature_extractor ([`ClapFeatureExtractor`]):
32
+ The audio processor is a required input.
33
+ tokenizer ([`RobertaTokenizerFast`]):
34
+ The tokenizer is a required input.
35
+ """
36
+
37
+ feature_extractor_class = "ClapFeatureExtractor"
38
+ tokenizer_class = ("RobertaTokenizer", "RobertaTokenizerFast")
39
+
40
+ def __init__(self, feature_extractor, tokenizer):
41
+ super().__init__(feature_extractor, tokenizer)
42
+
43
+ def __call__(self, text=None, audios=None, return_tensors=None, **kwargs):
44
+ """
45
+ Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `text`
46
+ and `kwargs` arguments to RobertaTokenizerFast's [`~RobertaTokenizerFast.__call__`] if `text` is not `None` to
47
+ encode the text. To prepare the audio(s), this method forwards the `audios` and `kwrags` arguments to
48
+ ClapFeatureExtractor's [`~ClapFeatureExtractor.__call__`] if `audios` is not `None`. Please refer to the
49
+ doctsring of the above two methods for more information.
50
+
51
+ Args:
52
+ text (`str`, `List[str]`, `List[List[str]]`):
53
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
54
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
55
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
56
+ audios (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`):
57
+ The audio or batch of audios to be prepared. Each audio can be NumPy array or PyTorch tensor. In case
58
+ of a NumPy array/PyTorch tensor, each audio should be of shape (C, T), where C is a number of channels,
59
+ and T the sample length of the audio.
60
+
61
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
62
+ If set, will return tensors of a particular framework. Acceptable values are:
63
+
64
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
65
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
66
+ - `'np'`: Return NumPy `np.ndarray` objects.
67
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
68
+
69
+ Returns:
70
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
71
+
72
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
73
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
74
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
75
+ `None`).
76
+ - **audio_features** -- Audio features to be fed to a model. Returned when `audios` is not `None`.
77
+ """
78
+ sampling_rate = kwargs.pop("sampling_rate", None)
79
+
80
+ if text is None and audios is None:
81
+ raise ValueError("You have to specify either text or audios. Both cannot be none.")
82
+
83
+ if text is not None:
84
+ encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)
85
+
86
+ if audios is not None:
87
+ audio_features = self.feature_extractor(
88
+ audios, sampling_rate=sampling_rate, return_tensors=return_tensors, **kwargs
89
+ )
90
+
91
+ if text is not None and audios is not None:
92
+ encoding["input_features"] = audio_features.input_features
93
+ return encoding
94
+ elif text is not None:
95
+ return encoding
96
+ else:
97
+ return BatchEncoding(data=dict(**audio_features), tensor_type=return_tensors)
98
+
99
+ def batch_decode(self, *args, **kwargs):
100
+ """
101
+ This method forwards all its arguments to RobertaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
102
+ refer to the docstring of this method for more information.
103
+ """
104
+ return self.tokenizer.batch_decode(*args, **kwargs)
105
+
106
+ def decode(self, *args, **kwargs):
107
+ """
108
+ This method forwards all its arguments to RobertaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer
109
+ to the docstring of this method for more information.
110
+ """
111
+ return self.tokenizer.decode(*args, **kwargs)
112
+
113
+ @property
114
+ def model_input_names(self):
115
+ tokenizer_input_names = self.tokenizer.model_input_names
116
+ feature_extractor_input_names = self.feature_extractor.model_input_names
117
+ return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
venv/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.42 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/configuration_open_llama.cpython-310.pyc ADDED
Binary file (6.28 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/modeling_open_llama.cpython-310.pyc ADDED
Binary file (31.4 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/configuration_open_llama.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ Open-Llama model configuration"""
21
+
22
+ from ....configuration_utils import PretrainedConfig
23
+ from ....utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ from .._archive_maps import OPEN_LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
30
+
31
+
32
+ class OpenLlamaConfig(PretrainedConfig):
33
+ r"""
34
+ This is the configuration class to store the configuration of a [`OpenLlamaModel`]. It is used to instantiate an
35
+ Open-Llama model according to the specified arguments, defining the model architecture. Instantiating a
36
+ configuration with the defaults will yield a similar configuration to that of the
37
+ [s-JoL/Open-Llama-V1](https://huggingface.co/s-JoL/Open-Llama-V1).
38
+
39
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
40
+ documentation from [`PretrainedConfig`] for more information.
41
+
42
+
43
+ Args:
44
+ vocab_size (`int`, *optional*, defaults to 32000):
45
+ Vocabulary size of the Open-Llama model. Defines the number of different tokens that can be represented by
46
+ the `inputs_ids` passed when calling [`OpenLlamaModel`]
47
+ hidden_size (`int`, *optional*, defaults to 4096):
48
+ Dimension of the hidden representations.
49
+ intermediate_size (`int`, *optional*, defaults to 11008):
50
+ Dimension of the MLP representations.
51
+ num_hidden_layers (`int`, *optional*, defaults to 32):
52
+ Number of hidden layers in the Transformer encoder.
53
+ num_attention_heads (`int`, *optional*, defaults to 32):
54
+ Number of attention heads for each attention layer in the Transformer encoder.
55
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
56
+ The non-linear activation function (function or string) in the decoder.
57
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
58
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
59
+ just in case (e.g., 512 or 1024 or 2048).
60
+ initializer_range (`float`, *optional*, defaults to 0.02):
61
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
62
+ rms_norm_eps (`float`, *optional*, defaults to 1e-12):
63
+ The epsilon used by the rms normalization layers.
64
+ use_cache (`bool`, *optional*, defaults to `True`):
65
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
66
+ relevant if `config.is_decoder=True`.
67
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
68
+ Whether to tie weight embeddings
69
+ rope_theta (`float`, *optional*, defaults to 10000.0):
70
+ The base period of the RoPE embeddings.
71
+ rope_scaling (`Dict`, *optional*):
72
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
73
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
74
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
75
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
76
+ these scaling strategies behave:
77
+ https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
78
+ experimental feature, subject to breaking API changes in future versions.
79
+
80
+ Example:
81
+
82
+ ```python
83
+ >>> from transformers import OpenLlamaModel, OpenLlamaConfig
84
+
85
+ >>> # Initializing a Open-Llama open_llama-7b style configuration
86
+ >>> configuration = OpenLlamaConfig()
87
+
88
+ >>> # Initializing a model from the open_llama-7b style configuration
89
+ >>> model = OpenLlamaModel(configuration)
90
+
91
+ >>> # Accessing the model configuration
92
+ >>> configuration = model.config
93
+ ```"""
94
+
95
+ model_type = "open-llama"
96
+
97
+ def __init__(
98
+ self,
99
+ vocab_size=100000,
100
+ hidden_size=4096,
101
+ intermediate_size=11008,
102
+ num_hidden_layers=32,
103
+ num_attention_heads=32,
104
+ hidden_act="silu",
105
+ max_position_embeddings=2048,
106
+ initializer_range=0.02,
107
+ rms_norm_eps=1e-6,
108
+ use_cache=True,
109
+ pad_token_id=0,
110
+ bos_token_id=1,
111
+ eos_token_id=2,
112
+ tie_word_embeddings=False,
113
+ use_memory_efficient_attention=True,
114
+ hidden_dropout_prob=0.1,
115
+ attention_dropout_prob=0.1,
116
+ use_stable_embedding=True,
117
+ shared_input_output_embedding=True,
118
+ rope_theta=10000.0,
119
+ rope_scaling=None,
120
+ **kwargs,
121
+ ):
122
+ self.vocab_size = vocab_size
123
+ self.max_position_embeddings = max_position_embeddings
124
+ self.hidden_size = hidden_size
125
+ self.intermediate_size = intermediate_size
126
+ self.num_hidden_layers = num_hidden_layers
127
+ self.num_attention_heads = num_attention_heads
128
+ self.hidden_act = hidden_act
129
+ self.initializer_range = initializer_range
130
+ self.rms_norm_eps = rms_norm_eps
131
+ self.use_cache = use_cache
132
+ self.use_memory_efficient_attention = kwargs.pop(
133
+ "use_memorry_efficient_attention", use_memory_efficient_attention
134
+ )
135
+ self.hidden_dropout_prob = hidden_dropout_prob
136
+ self.attention_dropout_prob = attention_dropout_prob
137
+ self.use_stable_embedding = use_stable_embedding
138
+ self.shared_input_output_embedding = shared_input_output_embedding
139
+ self.rope_theta = rope_theta
140
+ self.rope_scaling = rope_scaling
141
+ self._rope_scaling_validation()
142
+
143
+ super().__init__(
144
+ pad_token_id=pad_token_id,
145
+ bos_token_id=bos_token_id,
146
+ eos_token_id=eos_token_id,
147
+ tie_word_embeddings=tie_word_embeddings,
148
+ **kwargs,
149
+ )
150
+
151
+ # Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation
152
+ def _rope_scaling_validation(self):
153
+ """
154
+ Validate the `rope_scaling` configuration.
155
+ """
156
+ if self.rope_scaling is None:
157
+ return
158
+
159
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
160
+ raise ValueError(
161
+ "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
162
+ )
163
+ rope_scaling_type = self.rope_scaling.get("type", None)
164
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
165
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
166
+ raise ValueError(
167
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
168
+ )
169
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
170
+ raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__init__.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_retribert": ["RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RetriBertConfig"],
22
+ "tokenization_retribert": ["RetriBertTokenizer"],
23
+ }
24
+
25
+ try:
26
+ if not is_tokenizers_available():
27
+ raise OptionalDependencyNotAvailable()
28
+ except OptionalDependencyNotAvailable:
29
+ pass
30
+ else:
31
+ _import_structure["tokenization_retribert_fast"] = ["RetriBertTokenizerFast"]
32
+
33
+ try:
34
+ if not is_torch_available():
35
+ raise OptionalDependencyNotAvailable()
36
+ except OptionalDependencyNotAvailable:
37
+ pass
38
+ else:
39
+ _import_structure["modeling_retribert"] = [
40
+ "RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
41
+ "RetriBertModel",
42
+ "RetriBertPreTrainedModel",
43
+ ]
44
+
45
+
46
+ if TYPE_CHECKING:
47
+ from .configuration_retribert import RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RetriBertConfig
48
+ from .tokenization_retribert import RetriBertTokenizer
49
+
50
+ try:
51
+ if not is_tokenizers_available():
52
+ raise OptionalDependencyNotAvailable()
53
+ except OptionalDependencyNotAvailable:
54
+ pass
55
+ else:
56
+ from .tokenization_retribert_fast import RetriBertTokenizerFast
57
+
58
+ try:
59
+ if not is_torch_available():
60
+ raise OptionalDependencyNotAvailable()
61
+ except OptionalDependencyNotAvailable:
62
+ pass
63
+ else:
64
+ from .modeling_retribert import (
65
+ RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
66
+ RetriBertModel,
67
+ RetriBertPreTrainedModel,
68
+ )
69
+
70
+ else:
71
+ import sys
72
+
73
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.23 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/configuration_retribert.cpython-310.pyc ADDED
Binary file (4.59 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/modeling_retribert.cpython-310.pyc ADDED
Binary file (7.46 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/tokenization_retribert.cpython-310.pyc ADDED
Binary file (17.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/tokenization_retribert_fast.cpython-310.pyc ADDED
Binary file (7 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/configuration_retribert.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ RetriBERT model configuration"""
16
+
17
+ from ....configuration_utils import PretrainedConfig
18
+ from ....utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ from .._archive_maps import RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
24
+
25
+
26
+ class RetriBertConfig(PretrainedConfig):
27
+ r"""
28
+ This is the configuration class to store the configuration of a [`RetriBertModel`]. It is used to instantiate a
29
+ RetriBertModel model according to the specified arguments, defining the model architecture. Instantiating a
30
+ configuration with the defaults will yield a similar configuration to that of the RetriBERT
31
+ [yjernite/retribert-base-uncased](https://huggingface.co/yjernite/retribert-base-uncased) architecture.
32
+
33
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
34
+ documentation from [`PretrainedConfig`] for more information.
35
+
36
+
37
+ Args:
38
+ vocab_size (`int`, *optional*, defaults to 30522):
39
+ Vocabulary size of the RetriBERT model. Defines the number of different tokens that can be represented by
40
+ the `inputs_ids` passed when calling [`RetriBertModel`]
41
+ hidden_size (`int`, *optional*, defaults to 768):
42
+ Dimensionality of the encoder layers and the pooler layer.
43
+ num_hidden_layers (`int`, *optional*, defaults to 12):
44
+ Number of hidden layers in the Transformer encoder.
45
+ num_attention_heads (`int`, *optional*, defaults to 12):
46
+ Number of attention heads for each attention layer in the Transformer encoder.
47
+ intermediate_size (`int`, *optional*, defaults to 3072):
48
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
49
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
50
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
51
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
52
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
53
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
54
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
55
+ The dropout ratio for the attention probabilities.
56
+ max_position_embeddings (`int`, *optional*, defaults to 512):
57
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
58
+ just in case (e.g., 512 or 1024 or 2048).
59
+ type_vocab_size (`int`, *optional*, defaults to 2):
60
+ The vocabulary size of the *token_type_ids* passed into [`BertModel`].
61
+ initializer_range (`float`, *optional*, defaults to 0.02):
62
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
63
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
64
+ The epsilon used by the layer normalization layers.
65
+ share_encoders (`bool`, *optional*, defaults to `True`):
66
+ Whether or not to use the same Bert-type encoder for the queries and document
67
+ projection_dim (`int`, *optional*, defaults to 128):
68
+ Final dimension of the query and document representation after projection
69
+ """
70
+
71
+ model_type = "retribert"
72
+
73
+ def __init__(
74
+ self,
75
+ vocab_size=30522,
76
+ hidden_size=768,
77
+ num_hidden_layers=8,
78
+ num_attention_heads=12,
79
+ intermediate_size=3072,
80
+ hidden_act="gelu",
81
+ hidden_dropout_prob=0.1,
82
+ attention_probs_dropout_prob=0.1,
83
+ max_position_embeddings=512,
84
+ type_vocab_size=2,
85
+ initializer_range=0.02,
86
+ layer_norm_eps=1e-12,
87
+ share_encoders=True,
88
+ projection_dim=128,
89
+ pad_token_id=0,
90
+ **kwargs,
91
+ ):
92
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
93
+
94
+ self.vocab_size = vocab_size
95
+ self.hidden_size = hidden_size
96
+ self.num_hidden_layers = num_hidden_layers
97
+ self.num_attention_heads = num_attention_heads
98
+ self.hidden_act = hidden_act
99
+ self.intermediate_size = intermediate_size
100
+ self.hidden_dropout_prob = hidden_dropout_prob
101
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
102
+ self.max_position_embeddings = max_position_embeddings
103
+ self.type_vocab_size = type_vocab_size
104
+ self.initializer_range = initializer_range
105
+ self.layer_norm_eps = layer_norm_eps
106
+ self.share_encoders = share_encoders
107
+ self.projection_dim = projection_dim
venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/modeling_retribert.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ RetriBERT model
17
+ """
18
+
19
+
20
+ import math
21
+ from typing import Optional
22
+
23
+ import torch
24
+ import torch.utils.checkpoint as checkpoint
25
+ from torch import nn
26
+
27
+ from ....modeling_utils import PreTrainedModel
28
+ from ....utils import add_start_docstrings, logging
29
+ from ...bert.modeling_bert import BertModel
30
+ from .configuration_retribert import RetriBertConfig
31
+
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+
36
+ from .._archive_maps import RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
37
+
38
+
39
+ # INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL #
40
+ class RetriBertPreTrainedModel(PreTrainedModel):
41
+ """
42
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
43
+ models.
44
+ """
45
+
46
+ config_class = RetriBertConfig
47
+ load_tf_weights = None
48
+ base_model_prefix = "retribert"
49
+
50
+ def _init_weights(self, module):
51
+ """Initialize the weights"""
52
+ if isinstance(module, nn.Linear):
53
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
54
+ if module.bias is not None:
55
+ module.bias.data.zero_()
56
+ elif isinstance(module, nn.Embedding):
57
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
58
+ if module.padding_idx is not None:
59
+ module.weight.data[module.padding_idx].zero_()
60
+ elif isinstance(module, nn.LayerNorm):
61
+ module.bias.data.zero_()
62
+ module.weight.data.fill_(1.0)
63
+
64
+
65
+ RETRIBERT_START_DOCSTRING = r"""
66
+
67
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
68
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
69
+ etc.)
70
+
71
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
72
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
73
+ and behavior.
74
+
75
+ Parameters:
76
+ config ([`RetriBertConfig`]): Model configuration class with all the parameters of the model.
77
+ Initializing with a config file does not load the weights associated with the model, only the
78
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
79
+ """
80
+
81
+
82
+ @add_start_docstrings(
83
+ """Bert Based model to embed queries or document for document retrieval.""",
84
+ RETRIBERT_START_DOCSTRING,
85
+ )
86
+ class RetriBertModel(RetriBertPreTrainedModel):
87
+ def __init__(self, config: RetriBertConfig) -> None:
88
+ super().__init__(config)
89
+ self.projection_dim = config.projection_dim
90
+
91
+ self.bert_query = BertModel(config)
92
+ self.bert_doc = None if config.share_encoders else BertModel(config)
93
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
94
+ self.project_query = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
95
+ self.project_doc = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
96
+
97
+ self.ce_loss = nn.CrossEntropyLoss(reduction="mean")
98
+
99
+ # Initialize weights and apply final processing
100
+ self.post_init()
101
+
102
+ def embed_sentences_checkpointed(
103
+ self,
104
+ input_ids,
105
+ attention_mask,
106
+ sent_encoder,
107
+ checkpoint_batch_size=-1,
108
+ ):
109
+ # reproduces BERT forward pass with checkpointing
110
+ if checkpoint_batch_size < 0 or input_ids.shape[0] < checkpoint_batch_size:
111
+ return sent_encoder(input_ids, attention_mask=attention_mask)[1]
112
+ else:
113
+ # prepare implicit variables
114
+ device = input_ids.device
115
+ input_shape = input_ids.size()
116
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
117
+ head_mask = [None] * sent_encoder.config.num_hidden_layers
118
+ extended_attention_mask: torch.Tensor = sent_encoder.get_extended_attention_mask(
119
+ attention_mask, input_shape
120
+ )
121
+
122
+ # define function for checkpointing
123
+ def partial_encode(*inputs):
124
+ encoder_outputs = sent_encoder.encoder(
125
+ inputs[0],
126
+ attention_mask=inputs[1],
127
+ head_mask=head_mask,
128
+ )
129
+ sequence_output = encoder_outputs[0]
130
+ pooled_output = sent_encoder.pooler(sequence_output)
131
+ return pooled_output
132
+
133
+ # run embedding layer on everything at once
134
+ embedding_output = sent_encoder.embeddings(
135
+ input_ids=input_ids, position_ids=None, token_type_ids=token_type_ids, inputs_embeds=None
136
+ )
137
+ # run encoding and pooling on one mini-batch at a time
138
+ pooled_output_list = []
139
+ for b in range(math.ceil(input_ids.shape[0] / checkpoint_batch_size)):
140
+ b_embedding_output = embedding_output[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size]
141
+ b_attention_mask = extended_attention_mask[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size]
142
+ pooled_output = checkpoint.checkpoint(partial_encode, b_embedding_output, b_attention_mask)
143
+ pooled_output_list.append(pooled_output)
144
+ return torch.cat(pooled_output_list, dim=0)
145
+
146
+ def embed_questions(
147
+ self,
148
+ input_ids,
149
+ attention_mask=None,
150
+ checkpoint_batch_size=-1,
151
+ ):
152
+ q_reps = self.embed_sentences_checkpointed(
153
+ input_ids,
154
+ attention_mask,
155
+ self.bert_query,
156
+ checkpoint_batch_size,
157
+ )
158
+ return self.project_query(q_reps)
159
+
160
+ def embed_answers(
161
+ self,
162
+ input_ids,
163
+ attention_mask=None,
164
+ checkpoint_batch_size=-1,
165
+ ):
166
+ a_reps = self.embed_sentences_checkpointed(
167
+ input_ids,
168
+ attention_mask,
169
+ self.bert_query if self.bert_doc is None else self.bert_doc,
170
+ checkpoint_batch_size,
171
+ )
172
+ return self.project_doc(a_reps)
173
+
174
+ def forward(
175
+ self,
176
+ input_ids_query: torch.LongTensor,
177
+ attention_mask_query: Optional[torch.FloatTensor],
178
+ input_ids_doc: torch.LongTensor,
179
+ attention_mask_doc: Optional[torch.FloatTensor],
180
+ checkpoint_batch_size: int = -1,
181
+ ) -> torch.FloatTensor:
182
+ r"""
183
+ Args:
184
+ input_ids_query (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
185
+ Indices of input sequence tokens in the vocabulary for the queries in a batch.
186
+
187
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
188
+ [`PreTrainedTokenizer.__call__`] for details.
189
+
190
+ [What are input IDs?](../glossary#input-ids)
191
+ attention_mask_query (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
192
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
193
+
194
+ - 1 for tokens that are **not masked**,
195
+ - 0 for tokens that are **masked**.
196
+
197
+ [What are attention masks?](../glossary#attention-mask)
198
+ input_ids_doc (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
199
+ Indices of input sequence tokens in the vocabulary for the documents in a batch.
200
+ attention_mask_doc (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
201
+ Mask to avoid performing attention on documents padding token indices.
202
+ checkpoint_batch_size (`int`, *optional*, defaults to `-1`):
203
+ If greater than 0, uses gradient checkpointing to only compute sequence representation on
204
+ `checkpoint_batch_size` examples at a time on the GPU. All query representations are still compared to
205
+ all document representations in the batch.
206
+
207
+ Return:
208
+ `torch.FloatTensor``: The bidirectional cross-entropy loss obtained while trying to match each query to its
209
+ corresponding document and each document to its corresponding query in the batch
210
+ """
211
+ device = input_ids_query.device
212
+ q_reps = self.embed_questions(input_ids_query, attention_mask_query, checkpoint_batch_size)
213
+ a_reps = self.embed_answers(input_ids_doc, attention_mask_doc, checkpoint_batch_size)
214
+ compare_scores = torch.mm(q_reps, a_reps.t())
215
+ loss_qa = self.ce_loss(compare_scores, torch.arange(compare_scores.shape[1]).to(device))
216
+ loss_aq = self.ce_loss(compare_scores.t(), torch.arange(compare_scores.shape[0]).to(device))
217
+ loss = (loss_qa + loss_aq) / 2
218
+ return loss
venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert.py ADDED
@@ -0,0 +1,517 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for RetriBERT."""
16
+
17
+ import collections
18
+ import os
19
+ import unicodedata
20
+ from typing import List, Optional, Tuple
21
+
22
+ from ....tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
23
+ from ....utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
29
+
30
+
31
+ # Copied from transformers.models.bert.tokenization_bert.load_vocab
32
+ def load_vocab(vocab_file):
33
+ """Loads a vocabulary file into a dictionary."""
34
+ vocab = collections.OrderedDict()
35
+ with open(vocab_file, "r", encoding="utf-8") as reader:
36
+ tokens = reader.readlines()
37
+ for index, token in enumerate(tokens):
38
+ token = token.rstrip("\n")
39
+ vocab[token] = index
40
+ return vocab
41
+
42
+
43
+ # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
44
+ def whitespace_tokenize(text):
45
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
46
+ text = text.strip()
47
+ if not text:
48
+ return []
49
+ tokens = text.split()
50
+ return tokens
51
+
52
+
53
+ class RetriBertTokenizer(PreTrainedTokenizer):
54
+ r"""
55
+ Constructs a RetriBERT tokenizer.
56
+
57
+ [`RetriBertTokenizer`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation splitting
58
+ and wordpiece.
59
+
60
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer
61
+ to: this superclass for more information regarding those methods.
62
+
63
+ Args:
64
+ vocab_file (`str`):
65
+ File containing the vocabulary.
66
+ do_lower_case (`bool`, *optional*, defaults to `True`):
67
+ Whether or not to lowercase the input when tokenizing.
68
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
69
+ Whether or not to do basic tokenization before WordPiece.
70
+ never_split (`Iterable`, *optional*):
71
+ Collection of tokens which will never be split during tokenization. Only has an effect when
72
+ `do_basic_tokenize=True`
73
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
74
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
75
+ token instead.
76
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
77
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
78
+ sequence classification or for a text and a question for question answering. It is also used as the last
79
+ token of a sequence built with special tokens.
80
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
81
+ The token used for padding, for example when batching sequences of different lengths.
82
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
83
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
84
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
85
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
86
+ The token used for masking values. This is the token used when training this model with masked language
87
+ modeling. This is the token which the model will try to predict.
88
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
89
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this
90
+ [issue](https://github.com/huggingface/transformers/issues/328)).
91
+ strip_accents (`bool`, *optional*):
92
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
93
+ value for `lowercase` (as in the original BERT).
94
+ """
95
+
96
+ vocab_files_names = VOCAB_FILES_NAMES
97
+ model_input_names = ["input_ids", "attention_mask"]
98
+
99
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.__init__
100
+ def __init__(
101
+ self,
102
+ vocab_file,
103
+ do_lower_case=True,
104
+ do_basic_tokenize=True,
105
+ never_split=None,
106
+ unk_token="[UNK]",
107
+ sep_token="[SEP]",
108
+ pad_token="[PAD]",
109
+ cls_token="[CLS]",
110
+ mask_token="[MASK]",
111
+ tokenize_chinese_chars=True,
112
+ strip_accents=None,
113
+ **kwargs,
114
+ ):
115
+ if not os.path.isfile(vocab_file):
116
+ raise ValueError(
117
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
118
+ " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
119
+ )
120
+ self.vocab = load_vocab(vocab_file)
121
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
122
+ self.do_basic_tokenize = do_basic_tokenize
123
+ if do_basic_tokenize:
124
+ self.basic_tokenizer = BasicTokenizer(
125
+ do_lower_case=do_lower_case,
126
+ never_split=never_split,
127
+ tokenize_chinese_chars=tokenize_chinese_chars,
128
+ strip_accents=strip_accents,
129
+ )
130
+
131
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
132
+
133
+ super().__init__(
134
+ do_lower_case=do_lower_case,
135
+ do_basic_tokenize=do_basic_tokenize,
136
+ never_split=never_split,
137
+ unk_token=unk_token,
138
+ sep_token=sep_token,
139
+ pad_token=pad_token,
140
+ cls_token=cls_token,
141
+ mask_token=mask_token,
142
+ tokenize_chinese_chars=tokenize_chinese_chars,
143
+ strip_accents=strip_accents,
144
+ **kwargs,
145
+ )
146
+
147
+ @property
148
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.do_lower_case
149
+ def do_lower_case(self):
150
+ return self.basic_tokenizer.do_lower_case
151
+
152
+ @property
153
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.vocab_size
154
+ def vocab_size(self):
155
+ return len(self.vocab)
156
+
157
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_vocab
158
+ def get_vocab(self):
159
+ return dict(self.vocab, **self.added_tokens_encoder)
160
+
161
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._tokenize
162
+ def _tokenize(self, text, split_special_tokens=False):
163
+ split_tokens = []
164
+ if self.do_basic_tokenize:
165
+ for token in self.basic_tokenizer.tokenize(
166
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
167
+ ):
168
+ # If the token is part of the never_split set
169
+ if token in self.basic_tokenizer.never_split:
170
+ split_tokens.append(token)
171
+ else:
172
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
173
+ else:
174
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
175
+ return split_tokens
176
+
177
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_token_to_id
178
+ def _convert_token_to_id(self, token):
179
+ """Converts a token (str) in an id using the vocab."""
180
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
181
+
182
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_id_to_token
183
+ def _convert_id_to_token(self, index):
184
+ """Converts an index (integer) in a token (str) using the vocab."""
185
+ return self.ids_to_tokens.get(index, self.unk_token)
186
+
187
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.convert_tokens_to_string
188
+ def convert_tokens_to_string(self, tokens):
189
+ """Converts a sequence of tokens (string) in a single string."""
190
+ out_string = " ".join(tokens).replace(" ##", "").strip()
191
+ return out_string
192
+
193
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.build_inputs_with_special_tokens
194
+ def build_inputs_with_special_tokens(
195
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
196
+ ) -> List[int]:
197
+ """
198
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
199
+ adding special tokens. A BERT sequence has the following format:
200
+
201
+ - single sequence: `[CLS] X [SEP]`
202
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
203
+
204
+ Args:
205
+ token_ids_0 (`List[int]`):
206
+ List of IDs to which the special tokens will be added.
207
+ token_ids_1 (`List[int]`, *optional*):
208
+ Optional second list of IDs for sequence pairs.
209
+
210
+ Returns:
211
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
212
+ """
213
+ if token_ids_1 is None:
214
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
215
+ cls = [self.cls_token_id]
216
+ sep = [self.sep_token_id]
217
+ return cls + token_ids_0 + sep + token_ids_1 + sep
218
+
219
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_special_tokens_mask
220
+ def get_special_tokens_mask(
221
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
222
+ ) -> List[int]:
223
+ """
224
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
225
+ special tokens using the tokenizer `prepare_for_model` method.
226
+
227
+ Args:
228
+ token_ids_0 (`List[int]`):
229
+ List of IDs.
230
+ token_ids_1 (`List[int]`, *optional*):
231
+ Optional second list of IDs for sequence pairs.
232
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
233
+ Whether or not the token list is already formatted with special tokens for the model.
234
+
235
+ Returns:
236
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
237
+ """
238
+
239
+ if already_has_special_tokens:
240
+ return super().get_special_tokens_mask(
241
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
242
+ )
243
+
244
+ if token_ids_1 is not None:
245
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
246
+ return [1] + ([0] * len(token_ids_0)) + [1]
247
+
248
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.create_token_type_ids_from_sequences
249
+ def create_token_type_ids_from_sequences(
250
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
251
+ ) -> List[int]:
252
+ """
253
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
254
+ pair mask has the following format:
255
+
256
+ ```
257
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
258
+ | first sequence | second sequence |
259
+ ```
260
+
261
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
262
+
263
+ Args:
264
+ token_ids_0 (`List[int]`):
265
+ List of IDs.
266
+ token_ids_1 (`List[int]`, *optional*):
267
+ Optional second list of IDs for sequence pairs.
268
+
269
+ Returns:
270
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
271
+ """
272
+ sep = [self.sep_token_id]
273
+ cls = [self.cls_token_id]
274
+ if token_ids_1 is None:
275
+ return len(cls + token_ids_0 + sep) * [0]
276
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
277
+
278
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.save_vocabulary
279
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
280
+ index = 0
281
+ if os.path.isdir(save_directory):
282
+ vocab_file = os.path.join(
283
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
284
+ )
285
+ else:
286
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
287
+ with open(vocab_file, "w", encoding="utf-8") as writer:
288
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
289
+ if index != token_index:
290
+ logger.warning(
291
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
292
+ " Please check that the vocabulary is not corrupted!"
293
+ )
294
+ index = token_index
295
+ writer.write(token + "\n")
296
+ index += 1
297
+ return (vocab_file,)
298
+
299
+
300
+ # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
301
+ class BasicTokenizer(object):
302
+ """
303
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
304
+
305
+ Args:
306
+ do_lower_case (`bool`, *optional*, defaults to `True`):
307
+ Whether or not to lowercase the input when tokenizing.
308
+ never_split (`Iterable`, *optional*):
309
+ Collection of tokens which will never be split during tokenization. Only has an effect when
310
+ `do_basic_tokenize=True`
311
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
312
+ Whether or not to tokenize Chinese characters.
313
+
314
+ This should likely be deactivated for Japanese (see this
315
+ [issue](https://github.com/huggingface/transformers/issues/328)).
316
+ strip_accents (`bool`, *optional*):
317
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
318
+ value for `lowercase` (as in the original BERT).
319
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
320
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
321
+ the full context of the words, such as contractions.
322
+ """
323
+
324
+ def __init__(
325
+ self,
326
+ do_lower_case=True,
327
+ never_split=None,
328
+ tokenize_chinese_chars=True,
329
+ strip_accents=None,
330
+ do_split_on_punc=True,
331
+ ):
332
+ if never_split is None:
333
+ never_split = []
334
+ self.do_lower_case = do_lower_case
335
+ self.never_split = set(never_split)
336
+ self.tokenize_chinese_chars = tokenize_chinese_chars
337
+ self.strip_accents = strip_accents
338
+ self.do_split_on_punc = do_split_on_punc
339
+
340
+ def tokenize(self, text, never_split=None):
341
+ """
342
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
343
+
344
+ Args:
345
+ never_split (`List[str]`, *optional*)
346
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
347
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
348
+ """
349
+ # union() returns a new set by concatenating the two sets.
350
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
351
+ text = self._clean_text(text)
352
+
353
+ # This was added on November 1st, 2018 for the multilingual and Chinese
354
+ # models. This is also applied to the English models now, but it doesn't
355
+ # matter since the English models were not trained on any Chinese data
356
+ # and generally don't have any Chinese data in them (there are Chinese
357
+ # characters in the vocabulary because Wikipedia does have some Chinese
358
+ # words in the English Wikipedia.).
359
+ if self.tokenize_chinese_chars:
360
+ text = self._tokenize_chinese_chars(text)
361
+ # prevents treating the same character with different unicode codepoints as different characters
362
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
363
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
364
+ split_tokens = []
365
+ for token in orig_tokens:
366
+ if token not in never_split:
367
+ if self.do_lower_case:
368
+ token = token.lower()
369
+ if self.strip_accents is not False:
370
+ token = self._run_strip_accents(token)
371
+ elif self.strip_accents:
372
+ token = self._run_strip_accents(token)
373
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
374
+
375
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
376
+ return output_tokens
377
+
378
+ def _run_strip_accents(self, text):
379
+ """Strips accents from a piece of text."""
380
+ text = unicodedata.normalize("NFD", text)
381
+ output = []
382
+ for char in text:
383
+ cat = unicodedata.category(char)
384
+ if cat == "Mn":
385
+ continue
386
+ output.append(char)
387
+ return "".join(output)
388
+
389
+ def _run_split_on_punc(self, text, never_split=None):
390
+ """Splits punctuation on a piece of text."""
391
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
392
+ return [text]
393
+ chars = list(text)
394
+ i = 0
395
+ start_new_word = True
396
+ output = []
397
+ while i < len(chars):
398
+ char = chars[i]
399
+ if _is_punctuation(char):
400
+ output.append([char])
401
+ start_new_word = True
402
+ else:
403
+ if start_new_word:
404
+ output.append([])
405
+ start_new_word = False
406
+ output[-1].append(char)
407
+ i += 1
408
+
409
+ return ["".join(x) for x in output]
410
+
411
+ def _tokenize_chinese_chars(self, text):
412
+ """Adds whitespace around any CJK character."""
413
+ output = []
414
+ for char in text:
415
+ cp = ord(char)
416
+ if self._is_chinese_char(cp):
417
+ output.append(" ")
418
+ output.append(char)
419
+ output.append(" ")
420
+ else:
421
+ output.append(char)
422
+ return "".join(output)
423
+
424
+ def _is_chinese_char(self, cp):
425
+ """Checks whether CP is the codepoint of a CJK character."""
426
+ # This defines a "chinese character" as anything in the CJK Unicode block:
427
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
428
+ #
429
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
430
+ # despite its name. The modern Korean Hangul alphabet is a different block,
431
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
432
+ # space-separated words, so they are not treated specially and handled
433
+ # like the all of the other languages.
434
+ if (
435
+ (cp >= 0x4E00 and cp <= 0x9FFF)
436
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
437
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
438
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
439
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
440
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
441
+ or (cp >= 0xF900 and cp <= 0xFAFF)
442
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
443
+ ): #
444
+ return True
445
+
446
+ return False
447
+
448
+ def _clean_text(self, text):
449
+ """Performs invalid character removal and whitespace cleanup on text."""
450
+ output = []
451
+ for char in text:
452
+ cp = ord(char)
453
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
454
+ continue
455
+ if _is_whitespace(char):
456
+ output.append(" ")
457
+ else:
458
+ output.append(char)
459
+ return "".join(output)
460
+
461
+
462
+ # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
463
+ class WordpieceTokenizer(object):
464
+ """Runs WordPiece tokenization."""
465
+
466
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
467
+ self.vocab = vocab
468
+ self.unk_token = unk_token
469
+ self.max_input_chars_per_word = max_input_chars_per_word
470
+
471
+ def tokenize(self, text):
472
+ """
473
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
474
+ tokenization using the given vocabulary.
475
+
476
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
477
+
478
+ Args:
479
+ text: A single token or whitespace separated tokens. This should have
480
+ already been passed through *BasicTokenizer*.
481
+
482
+ Returns:
483
+ A list of wordpiece tokens.
484
+ """
485
+
486
+ output_tokens = []
487
+ for token in whitespace_tokenize(text):
488
+ chars = list(token)
489
+ if len(chars) > self.max_input_chars_per_word:
490
+ output_tokens.append(self.unk_token)
491
+ continue
492
+
493
+ is_bad = False
494
+ start = 0
495
+ sub_tokens = []
496
+ while start < len(chars):
497
+ end = len(chars)
498
+ cur_substr = None
499
+ while start < end:
500
+ substr = "".join(chars[start:end])
501
+ if start > 0:
502
+ substr = "##" + substr
503
+ if substr in self.vocab:
504
+ cur_substr = substr
505
+ break
506
+ end -= 1
507
+ if cur_substr is None:
508
+ is_bad = True
509
+ break
510
+ sub_tokens.append(cur_substr)
511
+ start = end
512
+
513
+ if is_bad:
514
+ output_tokens.append(self.unk_token)
515
+ else:
516
+ output_tokens.extend(sub_tokens)
517
+ return output_tokens
venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert_fast.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for RetriBERT."""
16
+
17
+ import json
18
+ from typing import List, Optional, Tuple
19
+
20
+ from tokenizers import normalizers
21
+
22
+ from ....tokenization_utils_fast import PreTrainedTokenizerFast
23
+ from ....utils import logging
24
+ from .tokenization_retribert import RetriBertTokenizer
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
30
+
31
+
32
+ class RetriBertTokenizerFast(PreTrainedTokenizerFast):
33
+ r"""
34
+ Construct a "fast" RetriBERT tokenizer (backed by HuggingFace's *tokenizers* library).
35
+
36
+ [`RetriBertTokenizerFast`] is identical to [`BertTokenizerFast`] and runs end-to-end tokenization: punctuation
37
+ splitting and wordpiece.
38
+
39
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
40
+ refer to this superclass for more information regarding those methods.
41
+
42
+ Args:
43
+ vocab_file (`str`):
44
+ File containing the vocabulary.
45
+ do_lower_case (`bool`, *optional*, defaults to `True`):
46
+ Whether or not to lowercase the input when tokenizing.
47
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
48
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
49
+ token instead.
50
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
51
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
52
+ sequence classification or for a text and a question for question answering. It is also used as the last
53
+ token of a sequence built with special tokens.
54
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
55
+ The token used for padding, for example when batching sequences of different lengths.
56
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
57
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
58
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
59
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
60
+ The token used for masking values. This is the token used when training this model with masked language
61
+ modeling. This is the token which the model will try to predict.
62
+ clean_text (`bool`, *optional*, defaults to `True`):
63
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
64
+ whitespaces by the classic one.
65
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
66
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
67
+ issue](https://github.com/huggingface/transformers/issues/328)).
68
+ strip_accents (`bool`, *optional*):
69
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
70
+ value for `lowercase` (as in the original BERT).
71
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
72
+ The prefix for subwords.
73
+ """
74
+
75
+ vocab_files_names = VOCAB_FILES_NAMES
76
+ slow_tokenizer_class = RetriBertTokenizer
77
+ model_input_names = ["input_ids", "attention_mask"]
78
+
79
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.__init__
80
+ def __init__(
81
+ self,
82
+ vocab_file=None,
83
+ tokenizer_file=None,
84
+ do_lower_case=True,
85
+ unk_token="[UNK]",
86
+ sep_token="[SEP]",
87
+ pad_token="[PAD]",
88
+ cls_token="[CLS]",
89
+ mask_token="[MASK]",
90
+ tokenize_chinese_chars=True,
91
+ strip_accents=None,
92
+ **kwargs,
93
+ ):
94
+ super().__init__(
95
+ vocab_file,
96
+ tokenizer_file=tokenizer_file,
97
+ do_lower_case=do_lower_case,
98
+ unk_token=unk_token,
99
+ sep_token=sep_token,
100
+ pad_token=pad_token,
101
+ cls_token=cls_token,
102
+ mask_token=mask_token,
103
+ tokenize_chinese_chars=tokenize_chinese_chars,
104
+ strip_accents=strip_accents,
105
+ **kwargs,
106
+ )
107
+
108
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
109
+ if (
110
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
111
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
112
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
113
+ ):
114
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
115
+ normalizer_state["lowercase"] = do_lower_case
116
+ normalizer_state["strip_accents"] = strip_accents
117
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
118
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
119
+
120
+ self.do_lower_case = do_lower_case
121
+
122
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.build_inputs_with_special_tokens
123
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
124
+ """
125
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
126
+ adding special tokens. A BERT sequence has the following format:
127
+
128
+ - single sequence: `[CLS] X [SEP]`
129
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
130
+
131
+ Args:
132
+ token_ids_0 (`List[int]`):
133
+ List of IDs to which the special tokens will be added.
134
+ token_ids_1 (`List[int]`, *optional*):
135
+ Optional second list of IDs for sequence pairs.
136
+
137
+ Returns:
138
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
139
+ """
140
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
141
+
142
+ if token_ids_1 is not None:
143
+ output += token_ids_1 + [self.sep_token_id]
144
+
145
+ return output
146
+
147
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.create_token_type_ids_from_sequences
148
+ def create_token_type_ids_from_sequences(
149
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
150
+ ) -> List[int]:
151
+ """
152
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
153
+ pair mask has the following format:
154
+
155
+ ```
156
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
157
+ | first sequence | second sequence |
158
+ ```
159
+
160
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
161
+
162
+ Args:
163
+ token_ids_0 (`List[int]`):
164
+ List of IDs.
165
+ token_ids_1 (`List[int]`, *optional*):
166
+ Optional second list of IDs for sequence pairs.
167
+
168
+ Returns:
169
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
170
+ """
171
+ sep = [self.sep_token_id]
172
+ cls = [self.cls_token_id]
173
+ if token_ids_1 is None:
174
+ return len(cls + token_ids_0 + sep) * [0]
175
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
176
+
177
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.save_vocabulary
178
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
179
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
180
+ return tuple(files)
venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__init__.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"],
22
+ "tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"],
23
+ }
24
+
25
+ try:
26
+ if not is_torch_available():
27
+ raise OptionalDependencyNotAvailable()
28
+ except OptionalDependencyNotAvailable:
29
+ pass
30
+ else:
31
+ _import_structure["modeling_transfo_xl"] = [
32
+ "TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
33
+ "AdaptiveEmbedding",
34
+ "TransfoXLForSequenceClassification",
35
+ "TransfoXLLMHeadModel",
36
+ "TransfoXLModel",
37
+ "TransfoXLPreTrainedModel",
38
+ "load_tf_weights_in_transfo_xl",
39
+ ]
40
+
41
+ try:
42
+ if not is_tf_available():
43
+ raise OptionalDependencyNotAvailable()
44
+ except OptionalDependencyNotAvailable:
45
+ pass
46
+ else:
47
+ _import_structure["modeling_tf_transfo_xl"] = [
48
+ "TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
49
+ "TFAdaptiveEmbedding",
50
+ "TFTransfoXLForSequenceClassification",
51
+ "TFTransfoXLLMHeadModel",
52
+ "TFTransfoXLMainLayer",
53
+ "TFTransfoXLModel",
54
+ "TFTransfoXLPreTrainedModel",
55
+ ]
56
+
57
+
58
+ if TYPE_CHECKING:
59
+ from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
60
+ from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
61
+
62
+ try:
63
+ if not is_torch_available():
64
+ raise OptionalDependencyNotAvailable()
65
+ except OptionalDependencyNotAvailable:
66
+ pass
67
+ else:
68
+ from .modeling_transfo_xl import (
69
+ TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
70
+ AdaptiveEmbedding,
71
+ TransfoXLForSequenceClassification,
72
+ TransfoXLLMHeadModel,
73
+ TransfoXLModel,
74
+ TransfoXLPreTrainedModel,
75
+ load_tf_weights_in_transfo_xl,
76
+ )
77
+
78
+ try:
79
+ if not is_tf_available():
80
+ raise OptionalDependencyNotAvailable()
81
+ except OptionalDependencyNotAvailable:
82
+ pass
83
+ else:
84
+ from .modeling_tf_transfo_xl import (
85
+ TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
86
+ TFAdaptiveEmbedding,
87
+ TFTransfoXLForSequenceClassification,
88
+ TFTransfoXLLMHeadModel,
89
+ TFTransfoXLMainLayer,
90
+ TFTransfoXLModel,
91
+ TFTransfoXLPreTrainedModel,
92
+ )
93
+
94
+ else:
95
+ import sys
96
+
97
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.62 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/configuration_transfo_xl.cpython-310.pyc ADDED
Binary file (6.81 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/convert_transfo_xl_original_tf_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (3.11 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_tf_transfo_xl.cpython-310.pyc ADDED
Binary file (34.4 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_tf_transfo_xl_utilities.cpython-310.pyc ADDED
Binary file (4.15 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_transfo_xl.cpython-310.pyc ADDED
Binary file (40.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_transfo_xl_utilities.cpython-310.pyc ADDED
Binary file (6.09 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/tokenization_transfo_xl.cpython-310.pyc ADDED
Binary file (25.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/configuration_transfo_xl.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ Transformer XL configuration"""
17
+
18
+ from ....configuration_utils import PretrainedConfig
19
+ from ....utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ from .._archive_maps import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
26
+
27
+
28
+ class TransfoXLConfig(PretrainedConfig):
29
+ """
30
+ This is the configuration class to store the configuration of a [`TransfoXLModel`] or a [`TFTransfoXLModel`]. It is
31
+ used to instantiate a Transformer-XL model according to the specified arguments, defining the model architecture.
32
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the TransfoXL
33
+ [transfo-xl/transfo-xl-wt103](https://huggingface.co/transfo-xl/transfo-xl-wt103) architecture.
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+ Args:
39
+ vocab_size (`int`, *optional*, defaults to 267735):
40
+ Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
41
+ `inputs_ids` passed when calling [`TransfoXLModel`] or [`TFTransfoXLModel`].
42
+ cutoffs (`List[int]`, *optional*, defaults to `[20000, 40000, 200000]`):
43
+ Cutoffs for the adaptive softmax.
44
+ d_model (`int`, *optional*, defaults to 1024):
45
+ Dimensionality of the model's hidden states.
46
+ d_embed (`int`, *optional*, defaults to 1024):
47
+ Dimensionality of the embeddings
48
+ n_head (`int`, *optional*, defaults to 16):
49
+ Number of attention heads for each attention layer in the Transformer encoder.
50
+ d_head (`int`, *optional*, defaults to 64):
51
+ Dimensionality of the model's heads.
52
+ d_inner (`int`, *optional*, defaults to 4096):
53
+ Inner dimension in FF
54
+ div_val (`int`, *optional*, defaults to 4):
55
+ Divident value for adapative input and softmax
56
+ pre_lnorm (`boolean`, *optional*, defaults to `False`):
57
+ Whether or not to apply LayerNorm to the input instead of the output in the blocks.
58
+ n_layer (`int`, *optional*, defaults to 18):
59
+ Number of hidden layers in the Transformer encoder.
60
+ mem_len (`int`, *optional*, defaults to 1600):
61
+ Length of the retained previous heads.
62
+ clamp_len (`int`, *optional*, defaults to 1000):
63
+ Use the same pos embeddings after clamp_len.
64
+ same_length (`boolean`, *optional*, defaults to `True`):
65
+ Whether or not to use the same attn length for all tokens
66
+ proj_share_all_but_first (`boolean`, *optional*, defaults to `True`):
67
+ True to share all but first projs, False not to share.
68
+ attn_type (`int`, *optional*, defaults to 0):
69
+ Attention type. 0 for Transformer-XL, 1 for Shaw et al, 2 for Vaswani et al, 3 for Al Rfou et al.
70
+ sample_softmax (`int`, *optional*, defaults to -1):
71
+ Number of samples in the sampled softmax.
72
+ adaptive (`boolean`, *optional*, defaults to `True`):
73
+ Whether or not to use adaptive softmax.
74
+ dropout (`float`, *optional*, defaults to 0.1):
75
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
76
+ dropatt (`float`, *optional*, defaults to 0.0):
77
+ The dropout ratio for the attention probabilities.
78
+ untie_r (`boolean`, *optional*, defaults to `True`):
79
+ Whether ot not to untie relative position biases.
80
+ init (`str`, *optional*, defaults to `"normal"`):
81
+ Parameter initializer to use.
82
+ init_range (`float`, *optional*, defaults to 0.01):
83
+ Parameters initialized by U(-init_range, init_range).
84
+ proj_init_std (`float`, *optional*, defaults to 0.01):
85
+ Parameters initialized by N(0, init_std)
86
+ init_std (`float`, *optional*, defaults to 0.02):
87
+ Parameters initialized by N(0, init_std)
88
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
89
+ The epsilon to use in the layer normalization layers
90
+ eos_token_id (`int`, *optional*, defaults to 0):
91
+ End of stream token id.
92
+
93
+ Examples:
94
+
95
+ ```python
96
+ >>> from transformers import TransfoXLConfig, TransfoXLModel
97
+
98
+ >>> # Initializing a Transformer XL configuration
99
+ >>> configuration = TransfoXLConfig()
100
+
101
+ >>> # Initializing a model (with random weights) from the configuration
102
+ >>> model = TransfoXLModel(configuration)
103
+
104
+ >>> # Accessing the model configuration
105
+ >>> configuration = model.config
106
+ ```"""
107
+
108
+ model_type = "transfo-xl"
109
+ keys_to_ignore_at_inference = ["mems"]
110
+ attribute_map = {
111
+ "n_token": "vocab_size",
112
+ "hidden_size": "d_model",
113
+ "num_attention_heads": "n_head",
114
+ "num_hidden_layers": "n_layer",
115
+ }
116
+
117
+ def __init__(
118
+ self,
119
+ vocab_size=267735,
120
+ cutoffs=[20000, 40000, 200000],
121
+ d_model=1024,
122
+ d_embed=1024,
123
+ n_head=16,
124
+ d_head=64,
125
+ d_inner=4096,
126
+ div_val=4,
127
+ pre_lnorm=False,
128
+ n_layer=18,
129
+ mem_len=1600,
130
+ clamp_len=1000,
131
+ same_length=True,
132
+ proj_share_all_but_first=True,
133
+ attn_type=0,
134
+ sample_softmax=-1,
135
+ adaptive=True,
136
+ dropout=0.1,
137
+ dropatt=0.0,
138
+ untie_r=True,
139
+ init="normal",
140
+ init_range=0.01,
141
+ proj_init_std=0.01,
142
+ init_std=0.02,
143
+ layer_norm_epsilon=1e-5,
144
+ eos_token_id=0,
145
+ **kwargs,
146
+ ):
147
+ self.vocab_size = vocab_size
148
+ self.cutoffs = []
149
+ self.cutoffs.extend(cutoffs)
150
+ if proj_share_all_but_first:
151
+ self.tie_projs = [False] + [True] * len(self.cutoffs)
152
+ else:
153
+ self.tie_projs = [False] + [False] * len(self.cutoffs)
154
+ self.d_model = d_model
155
+ self.d_embed = d_embed
156
+ self.d_head = d_head
157
+ self.d_inner = d_inner
158
+ self.div_val = div_val
159
+ self.pre_lnorm = pre_lnorm
160
+ self.n_layer = n_layer
161
+ self.n_head = n_head
162
+ self.mem_len = mem_len
163
+ self.same_length = same_length
164
+ self.attn_type = attn_type
165
+ self.clamp_len = clamp_len
166
+ self.sample_softmax = sample_softmax
167
+ self.adaptive = adaptive
168
+ self.dropout = dropout
169
+ self.dropatt = dropatt
170
+ self.untie_r = untie_r
171
+ self.init = init
172
+ self.init_range = init_range
173
+ self.proj_init_std = proj_init_std
174
+ self.init_std = init_std
175
+ self.layer_norm_epsilon = layer_norm_epsilon
176
+ super().__init__(eos_token_id=eos_token_id, **kwargs)
177
+
178
+ @property
179
+ def max_position_embeddings(self):
180
+ # Message copied from Transformer-XL documentation
181
+ logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
182
+ return -1
183
+
184
+ @max_position_embeddings.setter
185
+ def max_position_embeddings(self, value):
186
+ # Message copied from Transformer-XL documentation
187
+ raise NotImplementedError(
188
+ f"The model {self.model_type} is one of the few models that has no sequence length limit."
189
+ )
venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert Transformer XL checkpoint and datasets."""
16
+
17
+
18
+ import argparse
19
+ import os
20
+ import pickle
21
+ import sys
22
+
23
+ import torch
24
+
25
+ from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
26
+ from transformers.models.deprecated.transfo_xl import tokenization_transfo_xl as data_utils
27
+ from transformers.models.deprecated.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
28
+ from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
29
+
30
+
31
+ logging.set_verbosity_info()
32
+
33
+ # We do this to be able to load python 2 datasets pickles
34
+ # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
35
+ data_utils.Vocab = data_utils.TransfoXLTokenizer
36
+ data_utils.Corpus = data_utils.TransfoXLCorpus
37
+ sys.modules["data_utils"] = data_utils
38
+ sys.modules["vocabulary"] = data_utils
39
+
40
+
41
+ def convert_transfo_xl_checkpoint_to_pytorch(
42
+ tf_checkpoint_path, transfo_xl_config_file, pytorch_dump_folder_path, transfo_xl_dataset_file
43
+ ):
44
+ if transfo_xl_dataset_file:
45
+ # Convert a pre-processed corpus (see original TensorFlow repo)
46
+ with open(transfo_xl_dataset_file, "rb") as fp:
47
+ corpus = pickle.load(fp, encoding="latin1")
48
+ # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
49
+ pytorch_vocab_dump_path = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
50
+ print(f"Save vocabulary to {pytorch_vocab_dump_path}")
51
+ corpus_vocab_dict = corpus.vocab.__dict__
52
+ torch.save(corpus_vocab_dict, pytorch_vocab_dump_path)
53
+
54
+ corpus_dict_no_vocab = corpus.__dict__
55
+ corpus_dict_no_vocab.pop("vocab", None)
56
+ pytorch_dataset_dump_path = pytorch_dump_folder_path + "/" + CORPUS_NAME
57
+ print(f"Save dataset to {pytorch_dataset_dump_path}")
58
+ torch.save(corpus_dict_no_vocab, pytorch_dataset_dump_path)
59
+
60
+ if tf_checkpoint_path:
61
+ # Convert a pre-trained TensorFlow model
62
+ config_path = os.path.abspath(transfo_xl_config_file)
63
+ tf_path = os.path.abspath(tf_checkpoint_path)
64
+
65
+ print(f"Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.")
66
+ # Initialise PyTorch model
67
+ if transfo_xl_config_file == "":
68
+ config = TransfoXLConfig()
69
+ else:
70
+ config = TransfoXLConfig.from_json_file(transfo_xl_config_file)
71
+ print(f"Building PyTorch model from configuration: {config}")
72
+ model = TransfoXLLMHeadModel(config)
73
+
74
+ model = load_tf_weights_in_transfo_xl(model, config, tf_path)
75
+ # Save pytorch-model
76
+ pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
77
+ pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME)
78
+ print(f"Save PyTorch model to {os.path.abspath(pytorch_weights_dump_path)}")
79
+ torch.save(model.state_dict(), pytorch_weights_dump_path)
80
+ print(f"Save configuration file to {os.path.abspath(pytorch_config_dump_path)}")
81
+ with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
82
+ f.write(config.to_json_string())
83
+
84
+
85
+ if __name__ == "__main__":
86
+ parser = argparse.ArgumentParser()
87
+ parser.add_argument(
88
+ "--pytorch_dump_folder_path",
89
+ default=None,
90
+ type=str,
91
+ required=True,
92
+ help="Path to the folder to store the PyTorch model or dataset/vocab.",
93
+ )
94
+ parser.add_argument(
95
+ "--tf_checkpoint_path",
96
+ default="",
97
+ type=str,
98
+ help="An optional path to a TensorFlow checkpoint path to be converted.",
99
+ )
100
+ parser.add_argument(
101
+ "--transfo_xl_config_file",
102
+ default="",
103
+ type=str,
104
+ help=(
105
+ "An optional config json file corresponding to the pre-trained BERT model. \n"
106
+ "This specifies the model architecture."
107
+ ),
108
+ )
109
+ parser.add_argument(
110
+ "--transfo_xl_dataset_file",
111
+ default="",
112
+ type=str,
113
+ help="An optional dataset file to be converted in a vocabulary.",
114
+ )
115
+ args = parser.parse_args()
116
+ convert_transfo_xl_checkpoint_to_pytorch(
117
+ args.tf_checkpoint_path,
118
+ args.transfo_xl_config_file,
119
+ args.pytorch_dump_folder_path,
120
+ args.transfo_xl_dataset_file,
121
+ )
venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl.py ADDED
@@ -0,0 +1,1122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ TF 2.0 Transformer XL model.
18
+ """
19
+
20
+ from __future__ import annotations
21
+
22
+ from dataclasses import dataclass
23
+ from typing import List, Optional, Tuple, Union
24
+
25
+ import numpy as np
26
+ import tensorflow as tf
27
+
28
+ from ....modeling_tf_utils import (
29
+ TFModelInputType,
30
+ TFPreTrainedModel,
31
+ TFSequenceClassificationLoss,
32
+ get_initializer,
33
+ keras,
34
+ keras_serializable,
35
+ unpack_inputs,
36
+ )
37
+ from ....tf_utils import shape_list, stable_softmax
38
+ from ....utils import (
39
+ ModelOutput,
40
+ add_code_sample_docstrings,
41
+ add_start_docstrings,
42
+ add_start_docstrings_to_model_forward,
43
+ logging,
44
+ )
45
+ from .configuration_transfo_xl import TransfoXLConfig
46
+ from .modeling_tf_transfo_xl_utilities import TFAdaptiveSoftmaxMask
47
+
48
+
49
+ logger = logging.get_logger(__name__)
50
+
51
+ _CHECKPOINT_FOR_DOC = "transfo-xl/transfo-xl-wt103"
52
+ _CONFIG_FOR_DOC = "TransfoXLConfig"
53
+
54
+
55
+ from .._archive_maps import TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
56
+
57
+
58
+ class TFPositionalEmbedding(keras.layers.Layer):
59
+ def __init__(self, demb, **kwargs):
60
+ super().__init__(**kwargs)
61
+
62
+ self.inv_freq = 1 / (10000 ** (tf.range(0, demb, 2.0) / demb))
63
+
64
+ def call(self, pos_seq, bsz=None):
65
+ self.inv_freq = tf.cast(self.inv_freq, dtype=pos_seq.dtype)
66
+ sinusoid_inp = tf.einsum("i,j->ij", pos_seq, self.inv_freq)
67
+ pos_emb = tf.concat([tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)], -1)
68
+
69
+ if bsz is not None:
70
+ return tf.tile(pos_emb[:, None, :], [1, bsz, 1])
71
+ else:
72
+ return pos_emb[:, None, :]
73
+
74
+
75
+ class TFPositionwiseFF(keras.layers.Layer):
76
+ def __init__(self, d_model, d_inner, dropout, pre_lnorm=False, layer_norm_epsilon=1e-5, init_std=0.02, **kwargs):
77
+ super().__init__(**kwargs)
78
+
79
+ self.d_model = d_model
80
+ self.d_inner = d_inner
81
+ self.dropout = dropout
82
+
83
+ self.layer_1 = keras.layers.Dense(
84
+ d_inner, kernel_initializer=get_initializer(init_std), activation=tf.nn.relu, name="CoreNet_._0"
85
+ )
86
+ self.drop_1 = keras.layers.Dropout(dropout)
87
+ self.layer_2 = keras.layers.Dense(d_model, kernel_initializer=get_initializer(init_std), name="CoreNet_._3")
88
+ self.drop_2 = keras.layers.Dropout(dropout)
89
+
90
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layer_norm")
91
+
92
+ self.pre_lnorm = pre_lnorm
93
+
94
+ def call(self, inp, training=False):
95
+ if self.pre_lnorm:
96
+ # layer normalization + positionwise feed-forward
97
+ core_out = self.layer_norm(inp)
98
+ core_out = self.layer_1(core_out)
99
+ core_out = self.drop_1(core_out, training=training)
100
+ core_out = self.layer_2(core_out)
101
+ core_out = self.drop_2(core_out, training=training)
102
+
103
+ # residual connection
104
+ output = core_out + inp
105
+ else:
106
+ # positionwise feed-forward
107
+ core_out = self.layer_1(inp)
108
+ core_out = self.drop_1(core_out, training=training)
109
+ core_out = self.layer_2(core_out)
110
+ core_out = self.drop_2(core_out, training=training)
111
+
112
+ # residual connection + layer normalization
113
+ output = self.layer_norm(inp + core_out)
114
+
115
+ return output
116
+
117
+
118
+ class TFRelPartialLearnableMultiHeadAttn(keras.layers.Layer):
119
+ def __init__(
120
+ self,
121
+ n_head,
122
+ d_model,
123
+ d_head,
124
+ dropout,
125
+ dropatt=0.0,
126
+ pre_lnorm=False,
127
+ r_r_bias=None,
128
+ r_w_bias=None,
129
+ layer_norm_epsilon=1e-5,
130
+ init_std=0.02,
131
+ output_attentions=False,
132
+ **kwargs,
133
+ ):
134
+ super().__init__(**kwargs)
135
+
136
+ self.n_head = n_head
137
+ self.d_model = d_model
138
+ self.d_head = d_head
139
+ self.dropout = dropout
140
+ self.output_attentions = output_attentions
141
+
142
+ self.qkv_net = keras.layers.Dense(
143
+ 3 * n_head * d_head, kernel_initializer=get_initializer(init_std), use_bias=False, name="qkv_net"
144
+ )
145
+
146
+ self.drop = keras.layers.Dropout(dropout)
147
+ self.dropatt = keras.layers.Dropout(dropatt)
148
+ self.o_net = keras.layers.Dense(
149
+ d_model, kernel_initializer=get_initializer(init_std), use_bias=False, name="o_net"
150
+ )
151
+
152
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layer_norm")
153
+
154
+ self.scale = 1 / (d_head**0.5)
155
+
156
+ self.pre_lnorm = pre_lnorm
157
+
158
+ if r_r_bias is not None and r_w_bias is not None: # Biases are shared
159
+ self.r_r_bias = r_r_bias
160
+ self.r_w_bias = r_w_bias
161
+ else:
162
+ self.r_r_bias = None
163
+ self.r_w_bias = None
164
+
165
+ self.r_net = keras.layers.Dense(
166
+ self.n_head * self.d_head, kernel_initializer=get_initializer(init_std), use_bias=False, name="r_net"
167
+ )
168
+
169
+ def build(self, input_shape):
170
+ if self.r_r_bias is None or self.r_w_bias is None: # Biases are not shared
171
+ self.r_r_bias = self.add_weight(
172
+ shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_r_bias"
173
+ )
174
+ self.r_w_bias = self.add_weight(
175
+ shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_w_bias"
176
+ )
177
+ super().build(input_shape)
178
+
179
+ def _rel_shift(self, x):
180
+ x_size = shape_list(x)
181
+
182
+ x = tf.pad(x, [[0, 0], [1, 0], [0, 0], [0, 0]])
183
+ x = tf.reshape(x, [x_size[1] + 1, x_size[0], x_size[2], x_size[3]])
184
+ x = tf.slice(x, [1, 0, 0, 0], [-1, -1, -1, -1])
185
+ x = tf.reshape(x, x_size)
186
+
187
+ return x
188
+
189
+ def call(self, w, r, attn_mask, mems, head_mask, output_attentions, training=False):
190
+ qlen, rlen, bsz = shape_list(w)[0], shape_list(r)[0], shape_list(w)[1]
191
+
192
+ if mems is not None:
193
+ mems = tf.cast(mems, dtype=w.dtype)
194
+ cat = tf.concat([mems, w], 0)
195
+ if self.pre_lnorm:
196
+ w_heads = self.qkv_net(self.layer_norm(cat))
197
+ else:
198
+ w_heads = self.qkv_net(cat)
199
+ r_head_k = self.r_net(r)
200
+
201
+ w_head_q, w_head_k, w_head_v = tf.split(w_heads, 3, axis=-1)
202
+ w_head_q = w_head_q[-qlen:]
203
+ else:
204
+ if self.pre_lnorm:
205
+ w_heads = self.qkv_net(self.layer_norm(w))
206
+ else:
207
+ w_heads = self.qkv_net(w)
208
+ r_head_k = self.r_net(r)
209
+
210
+ w_head_q, w_head_k, w_head_v = tf.split(w_heads, 3, axis=-1)
211
+
212
+ klen = shape_list(w_head_k)[0]
213
+
214
+ w_head_q = tf.reshape(w_head_q, (qlen, bsz, self.n_head, self.d_head)) # qlen x bsz x n_head x d_head
215
+ w_head_k = tf.reshape(w_head_k, (klen, bsz, self.n_head, self.d_head)) # qlen x bsz x n_head x d_head
216
+ w_head_v = tf.reshape(w_head_v, (klen, bsz, self.n_head, self.d_head)) # qlen x bsz x n_head x d_head
217
+
218
+ r_head_k = tf.reshape(r_head_k, (rlen, self.n_head, self.d_head)) # qlen x n_head x d_head
219
+
220
+ # compute attention score
221
+ rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head
222
+ AC = tf.einsum("ibnd,jbnd->ijbn", rw_head_q, w_head_k) # qlen x klen x bsz x n_head
223
+
224
+ rr_head_q = w_head_q + self.r_r_bias
225
+ BD = tf.einsum("ibnd,jnd->ijbn", rr_head_q, r_head_k) # qlen x klen x bsz x n_head
226
+ BD = self._rel_shift(BD)
227
+
228
+ # [qlen x klen x bsz x n_head]
229
+ attn_score = AC + BD
230
+ attn_score = attn_score * self.scale
231
+
232
+ # compute attention probability
233
+ if attn_mask is not None:
234
+ attn_mask_t = attn_mask[:, :, None, None]
235
+ attn_mask_t = tf.cast(attn_mask_t, dtype=attn_score.dtype)
236
+ attn_score = attn_score * (1.0 - attn_mask_t) - 1e30 * attn_mask_t
237
+
238
+ # [qlen x klen x bsz x n_head]
239
+ attn_prob = stable_softmax(attn_score, axis=1)
240
+ attn_prob = self.dropatt(attn_prob, training=training)
241
+
242
+ # Mask heads if we want to
243
+ if head_mask is not None:
244
+ attn_prob = attn_prob * head_mask
245
+
246
+ # compute attention vector
247
+ attn_vec = tf.einsum("ijbn,jbnd->ibnd", attn_prob, w_head_v)
248
+
249
+ # [qlen x bsz x n_head x d_head]
250
+ attn_vec_sizes = shape_list(attn_vec)
251
+ attn_vec = tf.reshape(attn_vec, (attn_vec_sizes[0], attn_vec_sizes[1], self.n_head * self.d_head))
252
+
253
+ # linear projection
254
+ attn_out = self.o_net(attn_vec)
255
+ attn_out = self.drop(attn_out, training=training)
256
+
257
+ if self.pre_lnorm:
258
+ # residual connection
259
+ outputs = [w + attn_out]
260
+ else:
261
+ # residual connection + layer normalization
262
+ outputs = [self.layer_norm(w + attn_out)]
263
+
264
+ if output_attentions:
265
+ outputs.append(attn_prob)
266
+
267
+ return outputs
268
+
269
+
270
+ class TFRelPartialLearnableDecoderLayer(keras.layers.Layer):
271
+ def __init__(
272
+ self,
273
+ n_head,
274
+ d_model,
275
+ d_head,
276
+ d_inner,
277
+ dropout,
278
+ dropatt=0.0,
279
+ pre_lnorm=False,
280
+ r_w_bias=None,
281
+ r_r_bias=None,
282
+ layer_norm_epsilon=1e-5,
283
+ init_std=0.02,
284
+ output_attentions=False,
285
+ **kwargs,
286
+ ):
287
+ super().__init__(**kwargs)
288
+
289
+ self.dec_attn = TFRelPartialLearnableMultiHeadAttn(
290
+ n_head,
291
+ d_model,
292
+ d_head,
293
+ dropout,
294
+ dropatt=dropatt,
295
+ pre_lnorm=pre_lnorm,
296
+ r_w_bias=r_w_bias,
297
+ r_r_bias=r_r_bias,
298
+ init_std=init_std,
299
+ layer_norm_epsilon=layer_norm_epsilon,
300
+ output_attentions=output_attentions,
301
+ name="dec_attn",
302
+ )
303
+ self.pos_ff = TFPositionwiseFF(
304
+ d_model,
305
+ d_inner,
306
+ dropout,
307
+ pre_lnorm=pre_lnorm,
308
+ init_std=init_std,
309
+ layer_norm_epsilon=layer_norm_epsilon,
310
+ name="pos_ff",
311
+ )
312
+
313
+ def call(self, dec_inp, r, dec_attn_mask, mems, head_mask, output_attentions, training=False):
314
+ attn_outputs = self.dec_attn(dec_inp, r, dec_attn_mask, mems, head_mask, output_attentions, training=training)
315
+ ff_output = self.pos_ff(attn_outputs[0], training=training)
316
+
317
+ outputs = [ff_output] + attn_outputs[1:]
318
+
319
+ return outputs
320
+
321
+
322
+ class TFTransfoEmbeddings(keras.layers.Layer):
323
+ def __init__(self, vocab_size, emb_size, init_std, **kwargs):
324
+ super().__init__(**kwargs)
325
+
326
+ self.vocab_size = vocab_size
327
+ self.emb_size = emb_size
328
+ self.init_std = init_std
329
+
330
+ def build(self, input_shape):
331
+ self.weight = self.add_weight(
332
+ shape=(self.vocab_size, self.emb_size),
333
+ initializer=get_initializer(self.init_std),
334
+ name="embeddings",
335
+ )
336
+
337
+ super().build(input_shape)
338
+
339
+ def call(self, inputs):
340
+ return tf.gather(self.weight, inputs)
341
+
342
+
343
+ class TFAdaptiveEmbedding(keras.layers.Layer):
344
+ def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, init_std=0.02, sample_softmax=False, **kwargs):
345
+ super().__init__(**kwargs)
346
+
347
+ self.n_token = n_token
348
+ self.d_embed = d_embed
349
+ self.init_std = init_std
350
+
351
+ self.cutoffs = cutoffs + [n_token]
352
+ self.div_val = div_val
353
+ self.d_proj = d_proj
354
+
355
+ self.emb_scale = d_proj**0.5
356
+
357
+ self.cutoff_ends = [0] + self.cutoffs
358
+
359
+ self.emb_layers = []
360
+ self.emb_projs = []
361
+
362
+ if div_val == 1:
363
+ raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
364
+ else:
365
+ for i in range(len(self.cutoffs)):
366
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
367
+ d_emb_i = d_embed // (div_val**i)
368
+ self.emb_layers.append(
369
+ TFTransfoEmbeddings(
370
+ r_idx - l_idx,
371
+ d_emb_i,
372
+ init_std,
373
+ name=f"emb_layers_._{i}",
374
+ )
375
+ )
376
+
377
+ def build(self, input_shape):
378
+ for i in range(len(self.cutoffs)):
379
+ d_emb_i = self.d_embed // (self.div_val**i)
380
+ self.emb_projs.append(
381
+ self.add_weight(
382
+ shape=(d_emb_i, self.d_proj),
383
+ initializer=get_initializer(self.init_std),
384
+ trainable=True,
385
+ name=f"emb_projs_._{i}",
386
+ )
387
+ )
388
+
389
+ super().build(input_shape)
390
+
391
+ def call(self, inp):
392
+ if self.div_val == 1:
393
+ raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
394
+ else:
395
+ inp_flat = tf.reshape(inp, (-1,))
396
+ emb_flat = tf.zeros([shape_list(inp_flat)[0], self.d_proj])
397
+ for i in range(len(self.cutoffs)):
398
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
399
+
400
+ mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
401
+
402
+ inp_i = tf.boolean_mask(inp_flat, mask_i) - l_idx
403
+ emb_i = self.emb_layers[i](inp_i)
404
+ emb_i = tf.einsum("id,de->ie", emb_i, self.emb_projs[i])
405
+
406
+ mask_idx = tf.where(mask_i)
407
+ scatter = tf.scatter_nd(mask_idx, emb_i, shape_list(emb_flat))
408
+ emb_flat = tf.cast(emb_flat, dtype=scatter.dtype)
409
+ emb_flat += scatter
410
+
411
+ embed_shape = shape_list(inp) + [self.d_proj]
412
+ embed = tf.reshape(emb_flat, embed_shape)
413
+
414
+ embed *= self.emb_scale
415
+
416
+ return embed
417
+
418
+
419
+ @keras_serializable
420
+ class TFTransfoXLMainLayer(keras.layers.Layer):
421
+ config_class = TransfoXLConfig
422
+
423
+ def __init__(self, config, **kwargs):
424
+ super().__init__(**kwargs)
425
+
426
+ self.config = config
427
+ self.output_hidden_states = config.output_hidden_states
428
+ self.output_attentions = config.output_attentions
429
+ self.return_dict = config.use_return_dict
430
+
431
+ self.n_token = config.vocab_size
432
+
433
+ self.d_embed = config.d_embed
434
+ self.d_model = config.d_model
435
+ self.n_head = config.n_head
436
+ self.d_head = config.d_head
437
+ self.untie_r = config.untie_r
438
+
439
+ self.word_emb = TFAdaptiveEmbedding(
440
+ config.vocab_size,
441
+ config.d_embed,
442
+ config.d_model,
443
+ config.cutoffs,
444
+ div_val=config.div_val,
445
+ init_std=config.init_std,
446
+ name="word_emb",
447
+ )
448
+
449
+ self.drop = keras.layers.Dropout(config.dropout)
450
+
451
+ self.n_layer = config.n_layer
452
+ self.mem_len = config.mem_len
453
+ self.attn_type = config.attn_type
454
+
455
+ self.layers = []
456
+ if config.attn_type == 0: # the default attention
457
+ for i in range(config.n_layer):
458
+ self.layers.append(
459
+ TFRelPartialLearnableDecoderLayer(
460
+ config.n_head,
461
+ config.d_model,
462
+ config.d_head,
463
+ config.d_inner,
464
+ config.dropout,
465
+ dropatt=config.dropatt,
466
+ pre_lnorm=config.pre_lnorm,
467
+ r_w_bias=None if self.untie_r else self.r_w_bias,
468
+ r_r_bias=None if self.untie_r else self.r_r_bias,
469
+ layer_norm_epsilon=config.layer_norm_epsilon,
470
+ init_std=config.init_std,
471
+ output_attentions=self.output_attentions,
472
+ name=f"layers_._{i}",
473
+ )
474
+ )
475
+ else: # learnable embeddings and absolute embeddings
476
+ raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
477
+
478
+ self.same_length = config.same_length
479
+ self.clamp_len = config.clamp_len
480
+
481
+ if self.attn_type == 0: # default attention
482
+ self.pos_emb = TFPositionalEmbedding(self.d_model, name="pos_emb")
483
+ else: # learnable embeddings and absolute embeddings
484
+ raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
485
+
486
+ def build(self, input_shape):
487
+ if not self.untie_r:
488
+ self.r_w_bias = self.add_weight(
489
+ shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_w_bias"
490
+ )
491
+ self.r_r_bias = self.add_weight(
492
+ shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_r_bias"
493
+ )
494
+ super().build(input_shape)
495
+
496
+ def get_input_embeddings(self):
497
+ return self.word_emb
498
+
499
+ def set_input_embeddings(self, value):
500
+ raise NotImplementedError
501
+
502
+ def backward_compatible(self):
503
+ self.sample_softmax = -1
504
+
505
+ def reset_memory_length(self, mem_len):
506
+ self.mem_len = mem_len
507
+
508
+ def _prune_heads(self, heads):
509
+ raise NotImplementedError
510
+
511
+ def init_mems(self, bsz):
512
+ if self.mem_len > 0:
513
+ mems = []
514
+ for i in range(self.n_layer):
515
+ empty = tf.zeros([self.mem_len, bsz, self.d_model])
516
+ mems.append(empty)
517
+
518
+ return mems
519
+ else:
520
+ return None
521
+
522
+ def _update_mems(self, hids, mems, mlen, qlen):
523
+ # does not deal with None
524
+ if mems is None:
525
+ return None
526
+
527
+ # mems is not None
528
+ assert len(hids) == len(mems), "len(hids) != len(mems)"
529
+
530
+ # There are `mlen + qlen` steps that can be cached into mems
531
+ new_mems = []
532
+ end_idx = mlen + tf.math.maximum(0, qlen)
533
+ beg_idx = tf.math.maximum(0, end_idx - tf.convert_to_tensor(self.mem_len))
534
+ for i in range(len(hids)):
535
+ mems[i] = tf.cast(mems[i], dtype=hids[i].dtype)
536
+ cat = tf.concat([mems[i], hids[i]], axis=0)
537
+ tf.stop_gradient(cat)
538
+ new_mems.append(cat[beg_idx:end_idx])
539
+
540
+ return new_mems
541
+
542
+ @unpack_inputs
543
+ def call(
544
+ self,
545
+ input_ids: TFModelInputType | None = None,
546
+ mems: List[tf.Tensor] | None = None,
547
+ head_mask: np.ndarray | tf.Tensor | None = None,
548
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
549
+ output_attentions: Optional[bool] = None,
550
+ output_hidden_states: Optional[bool] = None,
551
+ return_dict: Optional[bool] = None,
552
+ labels: np.ndarray | tf.Tensor | None = None,
553
+ training: bool = False,
554
+ ):
555
+ # the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library
556
+ # so we transpose here from shape [bsz, len] to shape [len, bsz]
557
+ if input_ids is not None and inputs_embeds is not None:
558
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
559
+ elif input_ids is not None:
560
+ input_ids = tf.transpose(input_ids, perm=(1, 0))
561
+ qlen, bsz = shape_list(input_ids)
562
+ elif inputs_embeds is not None:
563
+ inputs_embeds = tf.transpose(inputs_embeds, perm=(1, 0, 2))
564
+ qlen, bsz = shape_list(inputs_embeds)[:2]
565
+ else:
566
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
567
+
568
+ if mems is None:
569
+ mems = self.init_mems(bsz)
570
+
571
+ # Prepare head mask if needed
572
+ # 1.0 in head_mask indicate we keep the head
573
+ # attention_probs has shape bsz x n_heads x N x N
574
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
575
+ # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
576
+ if head_mask is not None:
577
+ raise NotImplementedError
578
+ else:
579
+ head_mask = [None] * self.n_layer
580
+
581
+ if inputs_embeds is not None:
582
+ word_emb = inputs_embeds
583
+ else:
584
+ word_emb = self.word_emb(input_ids)
585
+
586
+ mlen = shape_list(mems[0])[0] if mems is not None else 0
587
+ klen = mlen + qlen
588
+
589
+ # Compute decoder attention mask
590
+ all_ones = tf.ones([qlen, klen], dtype=tf.int32)
591
+ upper_mask = 1 - tf.linalg.band_part(tf.ones([qlen, klen], dtype=tf.int32), -1, mlen)
592
+ if self.same_length:
593
+ mask_len = klen - self.mem_len
594
+ mask_shift_len = qlen - tf.nn.relu(mask_len) # Lazy clamping of negatives to zero
595
+
596
+ # Use an indicator variable instead of a conditional to keep the compiler happy
597
+ lower_mask = tf.linalg.band_part(all_ones, -1, 0) - (
598
+ tf.linalg.band_part(all_ones, mask_shift_len - 1, 0) * tf.cast(mask_shift_len != 0, tf.int32)
599
+ )
600
+ dec_attn_mask = upper_mask + lower_mask
601
+ else:
602
+ dec_attn_mask = upper_mask
603
+
604
+ hids = []
605
+ attentions = [] if output_attentions else None
606
+ if self.attn_type == 0: # default
607
+ pos_seq = tf.range(klen - 1, -1, -1.0)
608
+ if self.clamp_len > 0:
609
+ pos_seq = tf.minimum(pos_seq, self.clamp_len)
610
+ pos_emb = self.pos_emb(pos_seq)
611
+
612
+ core_out = self.drop(word_emb, training=training)
613
+ pos_emb = self.drop(pos_emb, training=training)
614
+
615
+ for i, layer in enumerate(self.layers):
616
+ hids.append(core_out)
617
+ mems_i = None if mems is None else mems[i]
618
+ layer_outputs = layer(
619
+ core_out,
620
+ pos_emb,
621
+ dec_attn_mask,
622
+ mems_i,
623
+ head_mask[i],
624
+ output_attentions,
625
+ training=training,
626
+ )
627
+ core_out = layer_outputs[0]
628
+ if output_attentions:
629
+ attentions.append(layer_outputs[1])
630
+ else: # learnable embeddings and absolute embeddings
631
+ raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
632
+
633
+ core_out = self.drop(core_out, training=training)
634
+
635
+ new_mems = self._update_mems(hids, mems, mlen, qlen)
636
+
637
+ # We transpose back here to shape [bsz, len, hidden_dim]
638
+ core_out = tf.transpose(core_out, perm=(1, 0, 2))
639
+
640
+ if output_hidden_states:
641
+ # Transpose to library standard shape [bsz, len, hidden_dim] and add last layer
642
+ hids = tuple(tf.transpose(t, perm=(1, 0, 2)) for t in hids)
643
+ hids = hids + (core_out,)
644
+ else:
645
+ hids = None
646
+ if output_attentions:
647
+ # Transpose to library standard shape [bsz, n_heads, query_seq_len, key_seq_len]
648
+ attentions = tuple(tf.transpose(t, perm=(2, 3, 0, 1)) for t in attentions)
649
+
650
+ if not return_dict:
651
+ return tuple(v for v in [core_out, new_mems, hids, attentions] if v is not None)
652
+
653
+ return TFTransfoXLModelOutput(
654
+ last_hidden_state=core_out,
655
+ mems=new_mems,
656
+ hidden_states=hids,
657
+ attentions=attentions,
658
+ )
659
+
660
+
661
+ class TFTransfoXLPreTrainedModel(TFPreTrainedModel):
662
+ """
663
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
664
+ models.
665
+ """
666
+
667
+ config_class = TransfoXLConfig
668
+ base_model_prefix = "transformer"
669
+
670
+
671
+ @dataclass
672
+ class TFTransfoXLModelOutput(ModelOutput):
673
+ """
674
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
675
+
676
+ Args:
677
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
678
+ Sequence of hidden-states at the output of the last layer of the model.
679
+ mems (`List[tf.Tensor]` of length `config.n_layers`):
680
+ Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
681
+ input) to speed up sequential decoding. The token ids which have their past given to this model should not
682
+ be passed as input ids as they have already been computed.
683
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
684
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
685
+ `(batch_size, sequence_length, hidden_size)`.
686
+
687
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
688
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
689
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
690
+ sequence_length)`.
691
+
692
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
693
+ heads.
694
+ """
695
+
696
+ last_hidden_state: tf.Tensor = None
697
+ mems: List[tf.Tensor] = None
698
+ hidden_states: Tuple[tf.Tensor] | None = None
699
+ attentions: Tuple[tf.Tensor] | None = None
700
+
701
+
702
+ @dataclass
703
+ class TFTransfoXLLMHeadModelOutput(ModelOutput):
704
+ """
705
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
706
+
707
+ Args:
708
+ losses (`tf.Tensor` of shape *(batch_size, sequence_length-1)*, *optional*, returned when `labels` is provided):
709
+ Language modeling losses (not reduced).
710
+ prediction_scores (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
711
+ Prediction scores of the language modeling head (scores for each vocabulary token after SoftMax).
712
+ mems (`List[tf.Tensor]` of length `config.n_layers`):
713
+ Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
714
+ input) to speed up sequential decoding. The token ids which have their past given to this model should not
715
+ be passed as input ids as they have already been computed.
716
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
717
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
718
+ `(batch_size, sequence_length, hidden_size)`.
719
+
720
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
721
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
722
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
723
+ sequence_length)`.
724
+
725
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
726
+ heads.
727
+ """
728
+
729
+ prediction_scores: tf.Tensor = None
730
+ mems: List[tf.Tensor] = None
731
+ hidden_states: Tuple[tf.Tensor] | None = None
732
+ attentions: Tuple[tf.Tensor] | None = None
733
+
734
+
735
+ @dataclass
736
+ class TFTransfoXLSequenceClassifierOutputWithPast(ModelOutput):
737
+ """
738
+ Base class for outputs of sentence classification models.
739
+
740
+ Args:
741
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
742
+ Classification (or regression if config.num_labels==1) loss.
743
+ logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):
744
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
745
+ mems (`List[tf.Tensor]` of length `config.n_layers`):
746
+ Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
747
+ input) to speed up sequential decoding. The token ids which have their past given to this model should not
748
+ be passed as input ids as they have already been computed.
749
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
750
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
751
+ `(batch_size, sequence_length, hidden_size)`.
752
+
753
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
754
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
755
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
756
+ sequence_length)`.
757
+
758
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
759
+ heads.
760
+ """
761
+
762
+ loss: tf.Tensor | None = None
763
+ logits: tf.Tensor = None
764
+ mems: List[tf.Tensor] = None
765
+ hidden_states: Tuple[tf.Tensor] | None = None
766
+ attentions: Tuple[tf.Tensor] | None = None
767
+
768
+
769
+ TRANSFO_XL_START_DOCSTRING = r"""
770
+
771
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
772
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
773
+ etc.)
774
+
775
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
776
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
777
+ behavior.
778
+
779
+ <Tip>
780
+
781
+ TensorFlow models and layers in `transformers` accept two formats as input:
782
+
783
+ - having all inputs as keyword arguments (like PyTorch models), or
784
+ - having all inputs as a list, tuple or dict in the first positional argument.
785
+
786
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
787
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
788
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
789
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
790
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
791
+ positional argument:
792
+
793
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
794
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
795
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
796
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
797
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
798
+
799
+ Note that when creating models and layers with
800
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
801
+ about any of this, as you can just pass inputs like you would to any other Python function!
802
+
803
+ </Tip>
804
+
805
+ Parameters:
806
+ config ([`TransfoXLConfig`]): Model configuration class with all the parameters of the model.
807
+ Initializing with a config file does not load the weights associated with the model, only the
808
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
809
+ """
810
+
811
+ TRANSFO_XL_INPUTS_DOCSTRING = r"""
812
+ Args:
813
+ input_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`):
814
+ Indices of input sequence tokens in the vocabulary.
815
+
816
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
817
+ [`PreTrainedTokenizer.encode`] for details.
818
+
819
+ [What are input IDs?](../glossary#input-ids)
820
+ mems (`List[tf.Tensor]` of length `config.n_layers`):
821
+ Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see
822
+ `mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems
823
+ given to this model should not be passed as `input_ids` as they have already been computed.
824
+ head_mask (`tf.Tensor` or `Numpy array` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
825
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
826
+
827
+ - 1 indicates the head is **not masked**,
828
+ - 0 indicates the head is **masked**.
829
+ inputs_embeds (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
830
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
831
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
832
+ model's internal embedding lookup matrix.
833
+ output_attentions (`bool`, *optional*):
834
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
835
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
836
+ config will be used instead.
837
+ output_hidden_states (`bool`, *optional*):
838
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
839
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
840
+ used instead.
841
+ return_dict (`bool`, *optional*):
842
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
843
+ eager mode, in graph mode the value will always be set to True.
844
+ training (`bool`, *optional*, defaults to `False`):
845
+ Whether or not to use the model in training mode (some modules like dropout modules have different
846
+ behaviors between training and evaluation).
847
+ """
848
+
849
+
850
+ @add_start_docstrings(
851
+ "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
852
+ TRANSFO_XL_START_DOCSTRING,
853
+ )
854
+ class TFTransfoXLModel(TFTransfoXLPreTrainedModel):
855
+ def __init__(self, config, *inputs, **kwargs):
856
+ super().__init__(config, *inputs, **kwargs)
857
+ self.transformer = TFTransfoXLMainLayer(config, name="transformer")
858
+
859
+ @unpack_inputs
860
+ @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
861
+ @add_code_sample_docstrings(
862
+ checkpoint=_CHECKPOINT_FOR_DOC,
863
+ output_type=TFTransfoXLModelOutput,
864
+ config_class=_CONFIG_FOR_DOC,
865
+ )
866
+ def call(
867
+ self,
868
+ input_ids: TFModelInputType | None = None,
869
+ mems: List[tf.Tensor] | None = None,
870
+ head_mask: np.ndarray | tf.Tensor | None = None,
871
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
872
+ output_attentions: bool | None = None,
873
+ output_hidden_states: bool | None = None,
874
+ return_dict: bool | None = None,
875
+ training: bool = False,
876
+ ) -> TFTransfoXLModelOutput | Tuple[tf.Tensor]:
877
+ outputs = self.transformer(
878
+ input_ids=input_ids,
879
+ mems=mems,
880
+ head_mask=head_mask,
881
+ inputs_embeds=inputs_embeds,
882
+ output_attentions=output_attentions,
883
+ output_hidden_states=output_hidden_states,
884
+ return_dict=return_dict,
885
+ training=training,
886
+ )
887
+
888
+ return outputs
889
+
890
+
891
+ @add_start_docstrings(
892
+ """
893
+ The Transformer-XL Model with a language modeling head on top (adaptive softmax with weights tied to the adaptive
894
+ input embeddings)
895
+ """,
896
+ TRANSFO_XL_START_DOCSTRING,
897
+ )
898
+ class TFTransfoXLLMHeadModel(TFTransfoXLPreTrainedModel):
899
+ def __init__(self, config):
900
+ super().__init__(config)
901
+ self.transformer = TFTransfoXLMainLayer(config, name="transformer")
902
+ self.sample_softmax = config.sample_softmax
903
+ assert self.sample_softmax <= 0, (
904
+ "Sampling from the softmax is not implemented yet. Please look at issue: #3310:"
905
+ " https://github.com/huggingface/transformers/issues/3310"
906
+ )
907
+
908
+ self.crit = TFAdaptiveSoftmaxMask(
909
+ config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val, name="crit"
910
+ )
911
+
912
+ def _resize_token_embeddings(self, new_num_tokens):
913
+ raise NotImplementedError()
914
+
915
+ def get_output_embeddings(self):
916
+ """Double-check if you are using adaptive softmax."""
917
+ if len(self.crit.out_layers) > 0:
918
+ return self.crit.out_layers[-1]
919
+ return None
920
+
921
+ def reset_memory_length(self, mem_len):
922
+ self.transformer.reset_memory_length(mem_len)
923
+
924
+ def init_mems(self, bsz):
925
+ return self.transformer.init_mems(bsz)
926
+
927
+ @unpack_inputs
928
+ @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
929
+ @add_code_sample_docstrings(
930
+ checkpoint=_CHECKPOINT_FOR_DOC,
931
+ output_type=TFTransfoXLLMHeadModelOutput,
932
+ config_class=_CONFIG_FOR_DOC,
933
+ )
934
+ def call(
935
+ self,
936
+ input_ids: TFModelInputType | None = None,
937
+ mems: List[tf.Tensor] | None = None,
938
+ head_mask: np.ndarray | tf.Tensor | None = None,
939
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
940
+ output_attentions: bool | None = None,
941
+ output_hidden_states: bool | None = None,
942
+ return_dict: bool | None = None,
943
+ labels: np.ndarray | tf.Tensor | None = None,
944
+ training: bool = False,
945
+ ) -> TFTransfoXLLMHeadModelOutput | Tuple[tf.Tensor]:
946
+ if input_ids is not None:
947
+ bsz, tgt_len = shape_list(input_ids)[:2]
948
+ else:
949
+ bsz, tgt_len = shape_list(inputs_embeds)[:2]
950
+
951
+ transformer_outputs = self.transformer(
952
+ input_ids,
953
+ mems,
954
+ head_mask,
955
+ inputs_embeds,
956
+ output_attentions,
957
+ output_hidden_states,
958
+ return_dict,
959
+ training=training,
960
+ )
961
+
962
+ last_hidden = transformer_outputs[0]
963
+ pred_hid = last_hidden[:, -tgt_len:]
964
+
965
+ softmax_output = self.crit(pred_hid, labels, training=training)
966
+ prediction_scores = softmax_output if labels is None else ()
967
+
968
+ if not return_dict:
969
+ return (prediction_scores,) + transformer_outputs[1:]
970
+
971
+ return TFTransfoXLLMHeadModelOutput(
972
+ prediction_scores=prediction_scores,
973
+ mems=transformer_outputs.mems,
974
+ hidden_states=transformer_outputs.hidden_states,
975
+ attentions=transformer_outputs.attentions,
976
+ )
977
+
978
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **model_kwargs):
979
+ inputs = {}
980
+
981
+ # if past is defined in model kwargs then use it for faster decoding
982
+ if past_key_values:
983
+ input_ids = tf.expand_dims(input_ids[:, -1], axis=-1)
984
+ else:
985
+ input_ids = input_ids
986
+
987
+ return inputs
988
+
989
+ # Adapted from the torch tie_weights function
990
+ def tf_to_pt_weight_rename(self, tf_weight):
991
+ if self.config.tie_word_embeddings and "crit.out_layers" in tf_weight:
992
+ return tf_weight, tf_weight.replace("crit.out_layers", "transformer.word_emb.emb_layers")
993
+ elif self.config.tie_projs and "crit.out_projs" in tf_weight:
994
+ for i, tie_proj in enumerate(self.config.tie_projs):
995
+ if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed:
996
+ # self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0]
997
+ return tf_weight, tf_weight.replace(f"crit.out_projs.{i}", "transformer.word_emb.emb_projs.0")
998
+ elif tie_proj and self.config.div_val != 1:
999
+ # self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i]
1000
+ return tf_weight, tf_weight.replace("crit.out_projs", "transformer.word_emb.emb_projs")
1001
+ else:
1002
+ return (tf_weight,)
1003
+
1004
+
1005
+ @add_start_docstrings(
1006
+ """
1007
+ The Transfo XL Model transformer with a sequence classification head on top (linear layer).
1008
+
1009
+ [`TFTransfoXLForSequenceClassification`] uses the last token in order to do the classification, as other causal
1010
+ models (e.g. GPT-1,GPT-2) do.
1011
+
1012
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1013
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1014
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1015
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1016
+ each row of the batch).
1017
+ """,
1018
+ TRANSFO_XL_START_DOCSTRING,
1019
+ )
1020
+ class TFTransfoXLForSequenceClassification(TFTransfoXLPreTrainedModel, TFSequenceClassificationLoss):
1021
+ def __init__(self, config, *inputs, **kwargs):
1022
+ super().__init__(config, *inputs, **kwargs)
1023
+ self.num_labels = config.num_labels
1024
+ self.score = keras.layers.Dense(
1025
+ config.num_labels,
1026
+ kernel_initializer=get_initializer(config.init_range),
1027
+ name="score",
1028
+ use_bias=False,
1029
+ )
1030
+ self.transformer = TFTransfoXLMainLayer(config, name="transformer")
1031
+
1032
+ def get_output_embeddings(self):
1033
+ # Remove after transformers v4.32. Fix this model's `test_model_common_attributes` test too.
1034
+ logger.warning(
1035
+ "Sequence classification models do not have output embeddings. `.get_output_embeddings` will be removed "
1036
+ "in transformers v4.32."
1037
+ )
1038
+ return self.transformer.word_emb
1039
+
1040
+ @unpack_inputs
1041
+ @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
1042
+ @add_code_sample_docstrings(
1043
+ checkpoint=_CHECKPOINT_FOR_DOC,
1044
+ output_type=TFTransfoXLSequenceClassifierOutputWithPast,
1045
+ config_class=_CONFIG_FOR_DOC,
1046
+ )
1047
+ def call(
1048
+ self,
1049
+ input_ids: TFModelInputType | None = None,
1050
+ mems: List[tf.Tensor] | None = None,
1051
+ head_mask: np.ndarray | tf.Tensor | None = None,
1052
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1053
+ output_attentions: Optional[bool] = None,
1054
+ output_hidden_states: Optional[bool] = None,
1055
+ return_dict: Optional[bool] = None,
1056
+ labels: np.ndarray | tf.Tensor | None = None,
1057
+ training: Optional[bool] = False,
1058
+ ) -> Union[Tuple, TFTransfoXLSequenceClassifierOutputWithPast]:
1059
+ r"""
1060
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1061
+ Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
1062
+ config.vocab_size - 1]`.
1063
+ """
1064
+ transformer_outputs = self.transformer(
1065
+ input_ids=input_ids,
1066
+ mems=mems,
1067
+ head_mask=head_mask,
1068
+ inputs_embeds=inputs_embeds,
1069
+ output_attentions=output_attentions,
1070
+ output_hidden_states=output_hidden_states,
1071
+ return_dict=return_dict,
1072
+ training=training,
1073
+ )
1074
+
1075
+ hidden_states = transformer_outputs[0]
1076
+ logits = self.score(hidden_states)
1077
+ in_logits = None
1078
+ if self.config.pad_token_id is None:
1079
+ sequence_lengths = -1
1080
+ else:
1081
+ if input_ids is not None:
1082
+ sequence_lengths = (
1083
+ tf.argmax(tf.cast(tf.math.equal(input_ids, self.config.pad_token_id), input_ids.dtype), axis=-1)
1084
+ - 1
1085
+ )
1086
+ sequence_lengths = tf.where(sequence_lengths >= 0, sequence_lengths, input_ids.shape[-1] - 1)
1087
+ in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1)
1088
+ else:
1089
+ sequence_lengths = -1
1090
+ logger.warning(
1091
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
1092
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
1093
+ )
1094
+ loss = None
1095
+
1096
+ if labels is not None:
1097
+ if input_ids is not None:
1098
+ batch_size, sequence_length = shape_list(input_ids)[:2]
1099
+ else:
1100
+ batch_size, sequence_length = shape_list(inputs_embeds)[:2]
1101
+ assert (
1102
+ self.config.pad_token_id is not None or batch_size == 1
1103
+ ), "Cannot handle batch sizes > 1 if no padding token is defined."
1104
+
1105
+ if not tf.is_tensor(sequence_lengths):
1106
+ in_logits = logits[0:batch_size, sequence_lengths]
1107
+
1108
+ loss = self.hf_compute_loss(tf.reshape(labels, [-1, 1]), tf.reshape(in_logits, [-1, self.num_labels]))
1109
+
1110
+ pooled_logits = in_logits if in_logits is not None else logits
1111
+
1112
+ if not return_dict:
1113
+ output = (pooled_logits,) + transformer_outputs[1:]
1114
+ return ((loss,) + output) if loss is not None else output
1115
+
1116
+ return TFTransfoXLSequenceClassifierOutputWithPast(
1117
+ loss=loss,
1118
+ logits=pooled_logits,
1119
+ mems=transformer_outputs.mems,
1120
+ hidden_states=transformer_outputs.hidden_states,
1121
+ attentions=transformer_outputs.attentions,
1122
+ )
venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl_utilities.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ A TF 2.0 Adaptive Softmax for Transformer XL model.
18
+ """
19
+
20
+
21
+ import tensorflow as tf
22
+
23
+ from ....modeling_tf_utils import keras
24
+ from ....tf_utils import shape_list
25
+
26
+
27
+ class TFAdaptiveSoftmaxMask(keras.layers.Layer):
28
+ def __init__(self, vocab_size, d_embed, d_proj, cutoffs, div_val=1, keep_order=False, **kwargs):
29
+ super().__init__(**kwargs)
30
+
31
+ self.vocab_size = vocab_size
32
+ self.d_embed = d_embed
33
+ self.d_proj = d_proj
34
+
35
+ self.cutoffs = cutoffs + [vocab_size]
36
+ self.cutoff_ends = [0] + self.cutoffs
37
+ self.div_val = div_val
38
+
39
+ self.shortlist_size = self.cutoffs[0]
40
+ self.n_clusters = len(self.cutoffs) - 1
41
+ self.head_size = self.shortlist_size + self.n_clusters
42
+ self.keep_order = keep_order
43
+
44
+ self.out_layers = []
45
+ self.out_projs = []
46
+
47
+ def build(self, input_shape):
48
+ if self.n_clusters > 0:
49
+ self.cluster_weight = self.add_weight(
50
+ shape=(self.n_clusters, self.d_embed), initializer="zeros", trainable=True, name="cluster_weight"
51
+ )
52
+ self.cluster_bias = self.add_weight(
53
+ shape=(self.n_clusters,), initializer="zeros", trainable=True, name="cluster_bias"
54
+ )
55
+
56
+ if self.div_val == 1:
57
+ for i in range(len(self.cutoffs)):
58
+ if self.d_proj != self.d_embed:
59
+ weight = self.add_weight(
60
+ shape=(self.d_embed, self.d_proj),
61
+ initializer="zeros",
62
+ trainable=True,
63
+ name=f"out_projs_._{i}",
64
+ )
65
+ self.out_projs.append(weight)
66
+ else:
67
+ self.out_projs.append(None)
68
+ weight = self.add_weight(
69
+ shape=(self.vocab_size, self.d_embed),
70
+ initializer="zeros",
71
+ trainable=True,
72
+ name=f"out_layers_._{i}_._weight",
73
+ )
74
+ bias = self.add_weight(
75
+ shape=(self.vocab_size,),
76
+ initializer="zeros",
77
+ trainable=True,
78
+ name=f"out_layers_._{i}_._bias",
79
+ )
80
+ self.out_layers.append((weight, bias))
81
+ else:
82
+ for i in range(len(self.cutoffs)):
83
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
84
+ d_emb_i = self.d_embed // (self.div_val**i)
85
+
86
+ weight = self.add_weight(
87
+ shape=(d_emb_i, self.d_proj), initializer="zeros", trainable=True, name=f"out_projs_._{i}"
88
+ )
89
+ self.out_projs.append(weight)
90
+ weight = self.add_weight(
91
+ shape=(r_idx - l_idx, d_emb_i),
92
+ initializer="zeros",
93
+ trainable=True,
94
+ name=f"out_layers_._{i}_._weight",
95
+ )
96
+ bias = self.add_weight(
97
+ shape=(r_idx - l_idx,),
98
+ initializer="zeros",
99
+ trainable=True,
100
+ name=f"out_layers_._{i}_._bias",
101
+ )
102
+ self.out_layers.append((weight, bias))
103
+ super().build(input_shape)
104
+
105
+ @staticmethod
106
+ def _logit(x, W, b, proj=None):
107
+ y = x
108
+ if proj is not None:
109
+ y = tf.einsum("ibd,ed->ibe", y, proj)
110
+ return tf.einsum("ibd,nd->ibn", y, W) + b
111
+
112
+ @staticmethod
113
+ def _gather_logprob(logprob, target):
114
+ lp_size = shape_list(logprob)
115
+ r = tf.range(lp_size[0], dtype=target.dtype)
116
+ idx = tf.stack([r, target], 1)
117
+ return tf.gather_nd(logprob, idx)
118
+
119
+ def call(self, hidden, target, return_mean=True, training=False):
120
+ head_logprob = 0
121
+ if self.n_clusters == 0:
122
+ output = self._logit(hidden, self.out_layers[0][0], self.out_layers[0][1], self.out_projs[0])
123
+ if target is not None:
124
+ loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output)
125
+ out = tf.nn.log_softmax(output, axis=-1)
126
+ else:
127
+ hidden_sizes = shape_list(hidden)
128
+ out = []
129
+ loss = tf.zeros(hidden_sizes[:2])
130
+ for i in range(len(self.cutoffs)):
131
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
132
+ if target is not None:
133
+ mask = (target >= l_idx) & (target < r_idx)
134
+ mask_idx = tf.where(mask)
135
+ cur_target = tf.boolean_mask(target, mask) - l_idx
136
+
137
+ if self.div_val == 1:
138
+ cur_W = self.out_layers[0][0][l_idx:r_idx]
139
+ cur_b = self.out_layers[0][1][l_idx:r_idx]
140
+ else:
141
+ cur_W = self.out_layers[i][0]
142
+ cur_b = self.out_layers[i][1]
143
+
144
+ if i == 0:
145
+ cur_W = tf.concat([cur_W, self.cluster_weight], 0)
146
+ cur_b = tf.concat([cur_b, self.cluster_bias], 0)
147
+
148
+ head_logit = self._logit(hidden, cur_W, cur_b, self.out_projs[0])
149
+ head_logprob = tf.nn.log_softmax(head_logit)
150
+ out.append(head_logprob[..., : self.cutoffs[0]])
151
+ if target is not None:
152
+ cur_head_logprob = tf.boolean_mask(head_logprob, mask)
153
+ cur_logprob = self._gather_logprob(cur_head_logprob, cur_target)
154
+ else:
155
+ tail_logit = self._logit(hidden, cur_W, cur_b, self.out_projs[i])
156
+ tail_logprob = tf.nn.log_softmax(tail_logit)
157
+ cluster_prob_idx = self.cutoffs[0] + i - 1 # No probability for the head cluster
158
+ logprob_i = head_logprob[..., cluster_prob_idx, None] + tail_logprob
159
+ out.append(logprob_i)
160
+ if target is not None:
161
+ cur_head_logprob = tf.boolean_mask(head_logprob, mask)
162
+ cur_tail_logprob = tf.boolean_mask(tail_logprob, mask)
163
+ cur_logprob = self._gather_logprob(cur_tail_logprob, cur_target)
164
+ cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
165
+ if target is not None:
166
+ loss += tf.scatter_nd(mask_idx, -cur_logprob, shape_list(loss))
167
+ out = tf.concat(out, axis=-1)
168
+
169
+ if target is not None:
170
+ if return_mean:
171
+ loss = tf.reduce_mean(loss)
172
+ # Add the training-time loss value to the layer using `self.add_loss()`.
173
+ self.add_loss(loss)
174
+
175
+ # Log the loss as a metric (we could log arbitrary metrics,
176
+ # including different metrics for training and inference.
177
+ self.add_metric(loss, name=self.name, aggregation="mean" if return_mean else "")
178
+
179
+ return out
venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py ADDED
@@ -0,0 +1,1295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ PyTorch Transformer XL model. Adapted from https://github.com/kimiyoung/transformer-xl. In particular
18
+ https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py
19
+ """
20
+ import warnings
21
+ from dataclasses import dataclass
22
+ from typing import List, Optional, Tuple, Union
23
+
24
+ import torch
25
+ from torch import nn
26
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
27
+
28
+ from ....modeling_utils import PreTrainedModel
29
+ from ....utils import (
30
+ ModelOutput,
31
+ add_code_sample_docstrings,
32
+ add_start_docstrings,
33
+ add_start_docstrings_to_model_forward,
34
+ logging,
35
+ )
36
+ from .configuration_transfo_xl import TransfoXLConfig
37
+ from .modeling_transfo_xl_utilities import ProjectedAdaptiveLogSoftmax
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+ _CHECKPOINT_FOR_DOC = "transfo-xl/transfo-xl-wt103"
43
+ _CONFIG_FOR_DOC = "TransfoXLConfig"
44
+
45
+
46
+ from .._archive_maps import TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
47
+
48
+
49
+ def build_tf_to_pytorch_map(model, config):
50
+ """
51
+ A map of modules from TF to PyTorch. This time I use a map to keep the PyTorch model as identical to the original
52
+ PyTorch model as possible.
53
+ """
54
+ tf_to_pt_map = {}
55
+
56
+ if hasattr(model, "transformer"):
57
+ # We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax
58
+ tf_to_pt_map.update(
59
+ {
60
+ "transformer/adaptive_softmax/cutoff_0/cluster_W": model.crit.cluster_weight,
61
+ "transformer/adaptive_softmax/cutoff_0/cluster_b": model.crit.cluster_bias,
62
+ }
63
+ )
64
+ for i, (out_l, proj_l, tie_proj) in enumerate(
65
+ zip(model.crit.out_layers, model.crit.out_projs, config.tie_projs)
66
+ ):
67
+ layer_str = f"transformer/adaptive_softmax/cutoff_{i}/"
68
+ if config.tie_word_embeddings:
69
+ tf_to_pt_map.update({layer_str + "b": out_l.bias})
70
+ else:
71
+ raise NotImplementedError
72
+ # I don't think this is implemented in the TF code
73
+ tf_to_pt_map.update({layer_str + "lookup_table": out_l.weight, layer_str + "b": out_l.bias})
74
+ if not tie_proj:
75
+ tf_to_pt_map.update({layer_str + "proj": proj_l})
76
+ # Now load the rest of the transformer
77
+ model = model.transformer
78
+
79
+ # Embeddings
80
+ for i, (embed_l, proj_l) in enumerate(zip(model.word_emb.emb_layers, model.word_emb.emb_projs)):
81
+ layer_str = f"transformer/adaptive_embed/cutoff_{i}/"
82
+ tf_to_pt_map.update({layer_str + "lookup_table": embed_l.weight, layer_str + "proj_W": proj_l})
83
+
84
+ # Transformer blocks
85
+ for i, b in enumerate(model.layers):
86
+ layer_str = f"transformer/layer_{i}/"
87
+ tf_to_pt_map.update(
88
+ {
89
+ layer_str + "rel_attn/LayerNorm/gamma": b.dec_attn.layer_norm.weight,
90
+ layer_str + "rel_attn/LayerNorm/beta": b.dec_attn.layer_norm.bias,
91
+ layer_str + "rel_attn/o/kernel": b.dec_attn.o_net.weight,
92
+ layer_str + "rel_attn/qkv/kernel": b.dec_attn.qkv_net.weight,
93
+ layer_str + "rel_attn/r/kernel": b.dec_attn.r_net.weight,
94
+ layer_str + "ff/LayerNorm/gamma": b.pos_ff.layer_norm.weight,
95
+ layer_str + "ff/LayerNorm/beta": b.pos_ff.layer_norm.bias,
96
+ layer_str + "ff/layer_1/kernel": b.pos_ff.CoreNet[0].weight,
97
+ layer_str + "ff/layer_1/bias": b.pos_ff.CoreNet[0].bias,
98
+ layer_str + "ff/layer_2/kernel": b.pos_ff.CoreNet[3].weight,
99
+ layer_str + "ff/layer_2/bias": b.pos_ff.CoreNet[3].bias,
100
+ }
101
+ )
102
+
103
+ # Relative positioning biases
104
+ if config.untie_r:
105
+ r_r_list = []
106
+ r_w_list = []
107
+ for b in model.layers:
108
+ r_r_list.append(b.dec_attn.r_r_bias)
109
+ r_w_list.append(b.dec_attn.r_w_bias)
110
+ else:
111
+ r_r_list = [model.r_r_bias]
112
+ r_w_list = [model.r_w_bias]
113
+ tf_to_pt_map.update({"transformer/r_r_bias": r_r_list, "transformer/r_w_bias": r_w_list})
114
+ return tf_to_pt_map
115
+
116
+
117
+ def load_tf_weights_in_transfo_xl(model, config, tf_path):
118
+ """Load tf checkpoints in a pytorch model"""
119
+ try:
120
+ import numpy as np
121
+ import tensorflow as tf
122
+ except ImportError:
123
+ logger.error(
124
+ "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
125
+ "https://www.tensorflow.org/install/ for installation instructions."
126
+ )
127
+ raise
128
+ # Build TF to PyTorch weights loading map
129
+ tf_to_pt_map = build_tf_to_pytorch_map(model, config)
130
+
131
+ # Load weights from TF model
132
+ init_vars = tf.train.list_variables(tf_path)
133
+ tf_weights = {}
134
+ for name, shape in init_vars:
135
+ logger.info(f"Loading TF weight {name} with shape {shape}")
136
+ array = tf.train.load_variable(tf_path, name)
137
+ tf_weights[name] = array
138
+
139
+ for name, pointer in tf_to_pt_map.items():
140
+ assert name in tf_weights
141
+ array = tf_weights[name]
142
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
143
+ # which are not required for using pretrained model
144
+ if "kernel" in name or "proj" in name:
145
+ array = np.transpose(array)
146
+ if ("r_r_bias" in name or "r_w_bias" in name) and len(pointer) > 1:
147
+ # Here we will split the TF weights
148
+ assert len(pointer) == array.shape[0]
149
+ for i, p_i in enumerate(pointer):
150
+ arr_i = array[i, ...]
151
+ try:
152
+ assert p_i.shape == arr_i.shape
153
+ except AssertionError as e:
154
+ e.args += (p_i.shape, arr_i.shape)
155
+ raise
156
+ logger.info(f"Initialize PyTorch weight {name} for layer {i}")
157
+ p_i.data = torch.from_numpy(arr_i)
158
+ else:
159
+ try:
160
+ assert (
161
+ pointer.shape == array.shape
162
+ ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
163
+ except AssertionError as e:
164
+ e.args += (pointer.shape, array.shape)
165
+ raise
166
+ logger.info(f"Initialize PyTorch weight {name}")
167
+ pointer.data = torch.from_numpy(array)
168
+ tf_weights.pop(name, None)
169
+ tf_weights.pop(name + "/Adam", None)
170
+ tf_weights.pop(name + "/Adam_1", None)
171
+
172
+ logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}")
173
+ return model
174
+
175
+
176
+ class PositionalEmbedding(nn.Module):
177
+ def __init__(self, demb):
178
+ super().__init__()
179
+
180
+ self.demb = demb
181
+
182
+ inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
183
+ self.register_buffer("inv_freq", inv_freq)
184
+
185
+ def forward(self, pos_seq, bsz=None):
186
+ sinusoid_inp = torch.outer(pos_seq, self.inv_freq)
187
+ pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
188
+
189
+ if bsz is not None:
190
+ return pos_emb[:, None, :].expand(-1, bsz, -1)
191
+ else:
192
+ return pos_emb[:, None, :]
193
+
194
+
195
+ class PositionwiseFF(nn.Module):
196
+ def __init__(self, d_model, d_inner, dropout, pre_lnorm=False, layer_norm_epsilon=1e-5):
197
+ super().__init__()
198
+
199
+ self.d_model = d_model
200
+ self.d_inner = d_inner
201
+ self.dropout = dropout
202
+
203
+ self.CoreNet = nn.Sequential(
204
+ nn.Linear(d_model, d_inner),
205
+ nn.ReLU(inplace=True),
206
+ nn.Dropout(dropout),
207
+ nn.Linear(d_inner, d_model),
208
+ nn.Dropout(dropout),
209
+ )
210
+
211
+ self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon)
212
+
213
+ self.pre_lnorm = pre_lnorm
214
+
215
+ def forward(self, inp):
216
+ if self.pre_lnorm:
217
+ # layer normalization + positionwise feed-forward
218
+ core_out = self.CoreNet(self.layer_norm(inp))
219
+
220
+ # residual connection
221
+ output = core_out + inp
222
+ else:
223
+ # positionwise feed-forward
224
+ core_out = self.CoreNet(inp)
225
+
226
+ # residual connection + layer normalization
227
+ output = self.layer_norm(inp + core_out)
228
+
229
+ return output
230
+
231
+
232
+ class RelPartialLearnableMultiHeadAttn(nn.Module):
233
+ def __init__(
234
+ self,
235
+ n_head,
236
+ d_model,
237
+ d_head,
238
+ dropout,
239
+ dropatt=0,
240
+ pre_lnorm=False,
241
+ r_r_bias=None,
242
+ r_w_bias=None,
243
+ layer_norm_epsilon=1e-5,
244
+ ):
245
+ super().__init__()
246
+
247
+ self.n_head = n_head
248
+ self.d_model = d_model
249
+ self.d_head = d_head
250
+ self.dropout = dropout
251
+
252
+ self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False)
253
+
254
+ self.drop = nn.Dropout(dropout)
255
+ self.dropatt = nn.Dropout(dropatt)
256
+ self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
257
+
258
+ self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon)
259
+
260
+ self.scale = 1 / (d_head**0.5)
261
+
262
+ self.pre_lnorm = pre_lnorm
263
+
264
+ if r_r_bias is None or r_w_bias is None: # Biases are not shared
265
+ self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
266
+ self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
267
+ else:
268
+ self.r_r_bias = r_r_bias
269
+ self.r_w_bias = r_w_bias
270
+
271
+ self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False)
272
+
273
+ def _rel_shift(self, x):
274
+ zero_pad_shape = (x.size(0), 1) + x.size()[2:]
275
+ zero_pad = torch.zeros(zero_pad_shape, device=x.device, dtype=x.dtype)
276
+ x_padded = torch.cat([zero_pad, x], dim=1)
277
+
278
+ x_padded_shape = (x.size(1) + 1, x.size(0)) + x.size()[2:]
279
+ x_padded = x_padded.view(*x_padded_shape)
280
+
281
+ x = x_padded[1:].view_as(x)
282
+
283
+ return x
284
+
285
+ def forward(self, w, r, attn_mask=None, mems=None, head_mask=None, output_attentions=False):
286
+ qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)
287
+
288
+ if mems is not None:
289
+ cat = torch.cat([mems, w], 0)
290
+ if self.pre_lnorm:
291
+ w_heads = self.qkv_net(self.layer_norm(cat))
292
+ else:
293
+ w_heads = self.qkv_net(cat)
294
+ r_head_k = self.r_net(r)
295
+
296
+ w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
297
+ w_head_q = w_head_q[-qlen:]
298
+ else:
299
+ if self.pre_lnorm:
300
+ w_heads = self.qkv_net(self.layer_norm(w))
301
+ else:
302
+ w_heads = self.qkv_net(w)
303
+ r_head_k = self.r_net(r)
304
+
305
+ w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
306
+
307
+ klen = w_head_k.size(0)
308
+
309
+ w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
310
+ w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
311
+ w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
312
+
313
+ r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head
314
+
315
+ # compute attention score
316
+ rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head
317
+ AC = torch.einsum("ibnd,jbnd->ijbn", (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
318
+
319
+ rr_head_q = w_head_q + self.r_r_bias
320
+ BD = torch.einsum("ibnd,jnd->ijbn", (rr_head_q, r_head_k)) # qlen x klen x bsz x n_head
321
+ BD = self._rel_shift(BD)
322
+
323
+ # [qlen x klen x bsz x n_head]
324
+ attn_score = AC + BD
325
+ attn_score.mul_(self.scale)
326
+
327
+ mask_value = torch.finfo(attn_score.dtype).min
328
+
329
+ # compute attention probability
330
+ if attn_mask is not None and torch.sum(attn_mask).item():
331
+ attn_mask = attn_mask == 1 # Switch to bool
332
+ if attn_mask.dim() == 2:
333
+ attn_score = (
334
+ attn_score.float().masked_fill(attn_mask[None, :, :, None], mask_value).type_as(attn_score)
335
+ )
336
+ elif attn_mask.dim() == 3:
337
+ attn_score = attn_score.float().masked_fill(attn_mask[:, :, :, None], mask_value).type_as(attn_score)
338
+
339
+ # [qlen x klen x bsz x n_head]
340
+ attn_prob = nn.functional.softmax(attn_score, dim=1)
341
+ attn_prob = self.dropatt(attn_prob)
342
+
343
+ # Mask heads if we want to
344
+ if head_mask is not None:
345
+ attn_prob = attn_prob * head_mask
346
+
347
+ # compute attention vector
348
+ attn_vec = torch.einsum("ijbn,jbnd->ibnd", (attn_prob, w_head_v))
349
+
350
+ # [qlen x bsz x n_head x d_head]
351
+ attn_vec = attn_vec.contiguous().view(attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
352
+
353
+ # linear projection
354
+ attn_out = self.o_net(attn_vec)
355
+ attn_out = self.drop(attn_out)
356
+
357
+ if self.pre_lnorm:
358
+ # residual connection
359
+ outputs = [w + attn_out]
360
+ else:
361
+ # residual connection + layer normalization
362
+ outputs = [self.layer_norm(w + attn_out)]
363
+
364
+ if output_attentions:
365
+ outputs.append(attn_prob)
366
+
367
+ return outputs
368
+
369
+
370
+ class RelPartialLearnableDecoderLayer(nn.Module):
371
+ def __init__(self, n_head, d_model, d_head, d_inner, dropout, layer_norm_epsilon=1e-5, **kwargs):
372
+ super().__init__()
373
+
374
+ self.dec_attn = RelPartialLearnableMultiHeadAttn(
375
+ n_head, d_model, d_head, dropout, layer_norm_epsilon=layer_norm_epsilon, **kwargs
376
+ )
377
+ self.pos_ff = PositionwiseFF(
378
+ d_model, d_inner, dropout, pre_lnorm=kwargs.get("pre_lnorm"), layer_norm_epsilon=layer_norm_epsilon
379
+ )
380
+
381
+ def forward(self, dec_inp, r, dec_attn_mask=None, mems=None, head_mask=None, output_attentions=False):
382
+ attn_outputs = self.dec_attn(
383
+ dec_inp,
384
+ r,
385
+ attn_mask=dec_attn_mask,
386
+ mems=mems,
387
+ head_mask=head_mask,
388
+ output_attentions=output_attentions,
389
+ )
390
+ ff_output = self.pos_ff(attn_outputs[0])
391
+
392
+ outputs = [ff_output] + attn_outputs[1:]
393
+
394
+ return outputs
395
+
396
+
397
+ class AdaptiveEmbedding(nn.Module):
398
+ def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, sample_softmax=False):
399
+ super().__init__()
400
+
401
+ self.n_token = n_token
402
+ self.d_embed = d_embed
403
+
404
+ self.cutoffs = cutoffs + [n_token]
405
+ self.div_val = div_val
406
+ self.d_proj = d_proj
407
+
408
+ self.emb_scale = d_proj**0.5
409
+
410
+ self.cutoff_ends = [0] + self.cutoffs
411
+
412
+ self.emb_layers = nn.ModuleList()
413
+ self.emb_projs = nn.ParameterList()
414
+ if div_val == 1:
415
+ self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0))
416
+ if d_proj != d_embed:
417
+ self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
418
+ else:
419
+ for i in range(len(self.cutoffs)):
420
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
421
+ d_emb_i = d_embed // (div_val**i)
422
+ self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i))
423
+ self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))
424
+
425
+ def forward(self, inp):
426
+ if self.div_val == 1:
427
+ embed = self.emb_layers[0](inp)
428
+ if self.d_proj != self.d_embed:
429
+ embed = nn.functional.linear(embed, self.emb_projs[0])
430
+ else:
431
+ param = next(self.parameters())
432
+ inp_flat = inp.view(-1)
433
+ emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device)
434
+ for i in range(len(self.cutoffs)):
435
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
436
+
437
+ mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
438
+ indices_i = mask_i.nonzero().squeeze()
439
+
440
+ if indices_i.numel() == 0:
441
+ continue
442
+
443
+ inp_i = inp_flat.index_select(0, indices_i) - l_idx
444
+ emb_i = self.emb_layers[i](inp_i)
445
+ emb_i = nn.functional.linear(emb_i, self.emb_projs[i])
446
+
447
+ emb_flat.index_copy_(0, indices_i, emb_i)
448
+
449
+ embed_shape = inp.size() + (self.d_proj,)
450
+ embed = emb_flat.view(embed_shape)
451
+
452
+ embed.mul_(self.emb_scale)
453
+
454
+ return embed
455
+
456
+
457
+ class TransfoXLPreTrainedModel(PreTrainedModel):
458
+ """
459
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
460
+ models.
461
+ """
462
+
463
+ config_class = TransfoXLConfig
464
+ load_tf_weights = load_tf_weights_in_transfo_xl
465
+ base_model_prefix = "transformer"
466
+
467
+ def _init_weight(self, weight):
468
+ if self.config.init == "uniform":
469
+ nn.init.uniform_(weight, -self.config.init_range, self.config.init_range)
470
+ elif self.config.init == "normal":
471
+ nn.init.normal_(weight, 0.0, self.config.init_std)
472
+
473
+ def _init_bias(self, bias):
474
+ nn.init.constant_(bias, 0.0)
475
+
476
+ def _init_weights(self, m):
477
+ """Initialize the weights."""
478
+ classname = m.__class__.__name__
479
+ if classname.find("Linear") != -1:
480
+ if hasattr(m, "weight") and m.weight is not None:
481
+ self._init_weight(m.weight)
482
+ if hasattr(m, "bias") and m.bias is not None:
483
+ self._init_bias(m.bias)
484
+ elif classname.find("AdaptiveEmbedding") != -1:
485
+ if hasattr(m, "emb_projs"):
486
+ for i in range(len(m.emb_projs)):
487
+ if m.emb_projs[i] is not None:
488
+ nn.init.normal_(m.emb_projs[i], 0.0, self.config.proj_init_std)
489
+ elif classname.find("Embedding") != -1:
490
+ if hasattr(m, "weight"):
491
+ self._init_weight(m.weight)
492
+ elif classname.find("ProjectedAdaptiveLogSoftmax") != -1:
493
+ if hasattr(m, "cluster_weight") and m.cluster_weight is not None:
494
+ self._init_weight(m.cluster_weight)
495
+ if hasattr(m, "cluster_bias") and m.cluster_bias is not None:
496
+ self._init_bias(m.cluster_bias)
497
+ if hasattr(m, "out_projs"):
498
+ for i in range(len(m.out_projs)):
499
+ if m.out_projs[i] is not None:
500
+ nn.init.normal_(m.out_projs[i], 0.0, self.config.proj_init_std)
501
+ elif classname.find("LayerNorm") != -1:
502
+ if hasattr(m, "weight"):
503
+ nn.init.normal_(m.weight, 1.0, self.config.init_std)
504
+ if hasattr(m, "bias") and m.bias is not None:
505
+ self._init_bias(m.bias)
506
+ else:
507
+ if hasattr(m, "r_emb"):
508
+ self._init_weight(m.r_emb)
509
+ if hasattr(m, "r_w_bias"):
510
+ self._init_weight(m.r_w_bias)
511
+ if hasattr(m, "r_r_bias"):
512
+ self._init_weight(m.r_r_bias)
513
+ if hasattr(m, "r_bias"):
514
+ self._init_bias(m.r_bias)
515
+
516
+ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, layer: Optional[int] = -1):
517
+ """
518
+ Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size. Take care of tying
519
+ weights embeddings afterwards if the model class has a *tie_weights()* method.
520
+
521
+ Arguments:
522
+ new_num_tokens: (*optional*) int:
523
+ New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at
524
+ the end. Reducing the size will remove vectors from the end. If not provided or None: does nothing and
525
+ just returns a pointer to the input tokens `torch.nn.Embeddings` Module of the model.
526
+ layer: (*optional*) int:
527
+ Layer of the *AdaptiveEmbedding* where the resizing should be done. Per default the last layer will be
528
+ resized. Be aware that when resizing other than the last layer, you have to ensure that the new
529
+ token(s) in the tokenizer are at the corresponding position.
530
+
531
+ Return: `torch.nn.Embeddings` Pointer to the input tokens Embeddings Module of the model
532
+ """
533
+ base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
534
+
535
+ if new_num_tokens is None:
536
+ return self.get_input_embeddings()
537
+
538
+ new_num_tokens_layer, layer = self._get_new_num_tokens_layer(new_num_tokens, layer)
539
+ assert new_num_tokens_layer > 0, "The size of the new embedding layer cannot be 0 or less"
540
+ model_embeds = base_model._resize_token_embeddings(new_num_tokens_layer, layer)
541
+
542
+ # Update base model and current model config
543
+ self.config.vocab_size = new_num_tokens
544
+ base_model.vocab_size = new_num_tokens
545
+ base_model.n_token = new_num_tokens
546
+
547
+ new_embedding_shapes = self._get_embedding_shapes()
548
+ self._resize_cutoffs(new_num_tokens, new_num_tokens_layer, new_embedding_shapes, layer)
549
+
550
+ # Tie weights again if needed
551
+ self.tie_weights()
552
+
553
+ return model_embeds
554
+
555
+ def _get_new_num_tokens_layer(self, new_num_tokens, layer):
556
+ embeddings = self.get_input_embeddings()
557
+ if layer == -1:
558
+ layer = len(embeddings.emb_layers) - 1
559
+ assert 0 <= layer <= len(embeddings.emb_layers) - 1
560
+
561
+ new_num_tokens_layer = (
562
+ new_num_tokens
563
+ - sum([emb.weight.shape[0] for emb in embeddings.emb_layers[:layer]])
564
+ - sum([emb.weight.shape[0] for emb in embeddings.emb_layers[layer + 1 :]])
565
+ )
566
+ return new_num_tokens_layer, layer
567
+
568
+ def _get_embedding_shapes(self):
569
+ embeddings = self.get_input_embeddings()
570
+ return [emb.weight.shape[0] for emb in embeddings.emb_layers]
571
+
572
+ def _resize_token_embeddings(self, new_num_tokens, layer=-1):
573
+ embeddings = self.get_input_embeddings()
574
+ if new_num_tokens is None:
575
+ return embeddings
576
+ new_embeddings_layer = self._get_resized_embeddings(embeddings.emb_layers[layer], new_num_tokens)
577
+ embeddings.emb_layers[layer] = new_embeddings_layer
578
+
579
+ self.set_input_embeddings(embeddings)
580
+
581
+ return self.get_input_embeddings()
582
+
583
+ def _resize_cutoffs(self, new_num_tokens, new_emb_size, new_embedding_shapes, layer):
584
+ embeddings = self.get_input_embeddings()
585
+
586
+ for i in range(layer, len(embeddings.cutoffs)):
587
+ embeddings.cutoffs[i] = sum(new_embedding_shapes[: i + 1])
588
+
589
+ embeddings.cutoff_ends = [0] + embeddings.cutoffs
590
+ embeddings.n_token = new_num_tokens
591
+
592
+ self.config.cutoffs = embeddings.cutoffs[:-1]
593
+
594
+ return embeddings.cutoffs
595
+
596
+
597
+ @dataclass
598
+ class TransfoXLModelOutput(ModelOutput):
599
+ """
600
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
601
+
602
+ Args:
603
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
604
+ Sequence of hidden-states at the output of the last layer of the model.
605
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
606
+ Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
607
+ input) to speed up sequential decoding. The token ids which have their past given to this model should not
608
+ be passed as input ids as they have already been computed.
609
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
610
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
611
+ shape `(batch_size, sequence_length, hidden_size)`.
612
+
613
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
614
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
615
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
616
+ sequence_length)`.
617
+
618
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
619
+ heads.
620
+ """
621
+
622
+ last_hidden_state: torch.FloatTensor
623
+ mems: List[torch.FloatTensor] = None
624
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
625
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
626
+
627
+
628
+ @dataclass
629
+ class TransfoXLSequenceClassifierOutputWithPast(ModelOutput):
630
+ """
631
+ Base class for outputs of sentence classification models.
632
+
633
+ Args:
634
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
635
+ Classification (or regression if config.num_labels==1) loss.
636
+ logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
637
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
638
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
639
+ Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
640
+ input) to speed up sequential decoding. The token ids which have their past given to this model should not
641
+ be passed as input ids as they have already been computed.
642
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
643
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
644
+ shape `(batch_size, sequence_length, hidden_size)`.
645
+
646
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
647
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
648
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
649
+ sequence_length)`.
650
+
651
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
652
+ heads.
653
+ """
654
+
655
+ loss: Optional[torch.FloatTensor] = None
656
+ logits: torch.FloatTensor = None
657
+ mems: List[torch.FloatTensor] = None
658
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
659
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
660
+
661
+
662
+ @dataclass
663
+ class TransfoXLLMHeadModelOutput(ModelOutput):
664
+ """
665
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
666
+
667
+ Args:
668
+ losses (`torch.FloatTensor` of shape *(batch_size, sequence_length-1)*, *optional*, returned when `labels` is provided):
669
+ Language modeling losses (not reduced).
670
+ prediction_scores (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
671
+ Prediction scores of the language modeling head (scores for each vocabulary token after SoftMax).
672
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
673
+ Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
674
+ input) to speed up sequential decoding. The token ids which have their past given to this model should not
675
+ be passed as input ids as they have already been computed.
676
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
677
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
678
+ shape `(batch_size, sequence_length, hidden_size)`.
679
+
680
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
681
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
682
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
683
+ sequence_length)`.
684
+
685
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
686
+ heads.
687
+ loss (`torch.FloatTensor` of shape `()`, *optional*, returned when `labels` is provided)
688
+ Reduced language modeling loss.
689
+ """
690
+
691
+ losses: Optional[torch.FloatTensor] = None
692
+ prediction_scores: torch.FloatTensor = None
693
+ mems: List[torch.FloatTensor] = None
694
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
695
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
696
+ loss: Optional[torch.FloatTensor] = None
697
+
698
+ @property
699
+ def logits(self):
700
+ # prediction scores are the output of the adaptive softmax, see
701
+ # the file `modeling_transfo_xl_utilities`. Since the adaptive
702
+ # softmax returns the log softmax value, `self.prediction_scores`
703
+ # are strictly speaking not exactly `logits`, but behave the same
704
+ # way logits do.
705
+ return self.prediction_scores
706
+
707
+
708
+ TRANSFO_XL_START_DOCSTRING = r"""
709
+
710
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
711
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
712
+ etc.)
713
+
714
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
715
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
716
+ and behavior.
717
+
718
+ Parameters:
719
+ config ([`TransfoXLConfig`]): Model configuration class with all the parameters of the model.
720
+ Initializing with a config file does not load the weights associated with the model, only the
721
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
722
+ """
723
+
724
+ TRANSFO_XL_INPUTS_DOCSTRING = r"""
725
+ Args:
726
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
727
+ Indices of input sequence tokens in the vocabulary.
728
+
729
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
730
+ [`PreTrainedTokenizer.__call__`] for details.
731
+
732
+ [What are input IDs?](../glossary#input-ids)
733
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
734
+ Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see
735
+ `mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems
736
+ given to this model should not be passed as `input_ids` as they have already been computed.
737
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
738
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
739
+
740
+ - 1 indicates the head is **not masked**,
741
+ - 0 indicates the head is **masked**.
742
+
743
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
744
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
745
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
746
+ model's internal embedding lookup matrix.
747
+ output_attentions (`bool`, *optional*):
748
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
749
+ tensors for more detail.
750
+ output_hidden_states (`bool`, *optional*):
751
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
752
+ more detail.
753
+ return_dict (`bool`, *optional*):
754
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
755
+ """
756
+
757
+
758
+ @add_start_docstrings(
759
+ "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
760
+ TRANSFO_XL_START_DOCSTRING,
761
+ )
762
+ class TransfoXLModel(TransfoXLPreTrainedModel):
763
+ def __init__(self, config):
764
+ super().__init__(config)
765
+
766
+ self.n_token = config.vocab_size
767
+
768
+ self.d_embed = config.d_embed
769
+ self.d_model = config.d_model
770
+ self.n_head = config.n_head
771
+ self.d_head = config.d_head
772
+
773
+ self.word_emb = AdaptiveEmbedding(
774
+ config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val
775
+ )
776
+
777
+ self.drop = nn.Dropout(config.dropout)
778
+
779
+ self.n_layer = config.n_layer
780
+ self.mem_len = config.mem_len
781
+ self.attn_type = config.attn_type
782
+
783
+ if not config.untie_r:
784
+ self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
785
+ self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
786
+
787
+ self.layers = nn.ModuleList()
788
+ if config.attn_type == 0: # the default attention
789
+ for i in range(config.n_layer):
790
+ self.layers.append(
791
+ RelPartialLearnableDecoderLayer(
792
+ config.n_head,
793
+ config.d_model,
794
+ config.d_head,
795
+ config.d_inner,
796
+ config.dropout,
797
+ dropatt=config.dropatt,
798
+ pre_lnorm=config.pre_lnorm,
799
+ r_w_bias=None if config.untie_r else self.r_w_bias,
800
+ r_r_bias=None if config.untie_r else self.r_r_bias,
801
+ layer_norm_epsilon=config.layer_norm_epsilon,
802
+ )
803
+ )
804
+ else: # learnable embeddings and absolute embeddings are not used in our pretrained checkpoints
805
+ raise NotImplementedError # Removed them to avoid maintaining dead code
806
+
807
+ self.same_length = config.same_length
808
+ self.clamp_len = config.clamp_len
809
+
810
+ if self.attn_type == 0: # default attention
811
+ self.pos_emb = PositionalEmbedding(self.d_model)
812
+ else: # learnable embeddings and absolute embeddings
813
+ raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
814
+
815
+ # Initialize weights and apply final processing
816
+ self.post_init()
817
+
818
+ def get_input_embeddings(self):
819
+ return self.word_emb
820
+
821
+ def set_input_embeddings(self, new_embeddings):
822
+ self.word_emb = new_embeddings
823
+
824
+ def backward_compatible(self):
825
+ self.sample_softmax = -1
826
+
827
+ def reset_memory_length(self, mem_len):
828
+ self.mem_len = mem_len
829
+
830
+ def _prune_heads(self, heads):
831
+ logger.info("Head pruning is not implemented for Transformer-XL model")
832
+ pass
833
+
834
+ def init_mems(self, bsz):
835
+ if self.mem_len > 0:
836
+ mems = []
837
+ param = next(self.parameters())
838
+ for i in range(self.n_layer):
839
+ empty = torch.zeros(self.mem_len, bsz, self.config.d_model, dtype=param.dtype, device=param.device)
840
+ mems.append(empty)
841
+
842
+ return mems
843
+ else:
844
+ return None
845
+
846
+ def _update_mems(self, hids, mems, mlen, qlen):
847
+ # does not deal with None
848
+ if mems is None:
849
+ return None
850
+
851
+ # mems is not None
852
+ assert len(hids) == len(mems), "len(hids) != len(mems)"
853
+
854
+ # There are `mlen + qlen` steps that can be cached into mems
855
+ with torch.no_grad():
856
+ new_mems = []
857
+ end_idx = mlen + max(0, qlen)
858
+ beg_idx = max(0, end_idx - self.mem_len)
859
+ for i in range(len(hids)):
860
+ cat = torch.cat([mems[i], hids[i]], dim=0)
861
+ new_mems.append(cat[beg_idx:end_idx].detach())
862
+
863
+ return new_mems
864
+
865
+ @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
866
+ @add_code_sample_docstrings(
867
+ checkpoint=_CHECKPOINT_FOR_DOC,
868
+ output_type=TransfoXLModelOutput,
869
+ config_class=_CONFIG_FOR_DOC,
870
+ )
871
+ def forward(
872
+ self,
873
+ input_ids: Optional[torch.LongTensor] = None,
874
+ mems: Optional[List[torch.FloatTensor]] = None,
875
+ head_mask: Optional[torch.FloatTensor] = None,
876
+ inputs_embeds: Optional[torch.FloatTensor] = None,
877
+ output_attentions: Optional[bool] = None,
878
+ output_hidden_states: Optional[bool] = None,
879
+ return_dict: Optional[bool] = None,
880
+ ) -> Union[Tuple, TransfoXLModelOutput]:
881
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
882
+ output_hidden_states = (
883
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
884
+ )
885
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
886
+
887
+ # the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library
888
+ # so we transpose here from shape [bsz, len] to shape [len, bsz]
889
+ if input_ids is not None and inputs_embeds is not None:
890
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
891
+ elif input_ids is not None:
892
+ input_ids = input_ids.transpose(0, 1).contiguous()
893
+ qlen, bsz = input_ids.size()
894
+ elif inputs_embeds is not None:
895
+ inputs_embeds = inputs_embeds.transpose(0, 1).contiguous()
896
+ qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1]
897
+ else:
898
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
899
+
900
+ if mems is None:
901
+ mems = self.init_mems(bsz)
902
+
903
+ # Prepare head mask if needed
904
+ # 1.0 in head_mask indicate we keep the head
905
+ # attention_probs has shape bsz x n_heads x N x N
906
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
907
+ # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
908
+ if head_mask is not None:
909
+ if head_mask.dim() == 1:
910
+ head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
911
+ head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
912
+ elif head_mask.dim() == 2:
913
+ head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
914
+ head_mask = head_mask.to(
915
+ dtype=next(self.parameters()).dtype
916
+ ) # switch to float if need + fp16 compatibility
917
+ else:
918
+ head_mask = [None] * self.n_layer
919
+
920
+ if inputs_embeds is not None:
921
+ word_emb = inputs_embeds
922
+ else:
923
+ word_emb = self.word_emb(input_ids)
924
+
925
+ mlen = mems[0].size(0) if mems is not None else 0
926
+ klen = mlen + qlen
927
+ if self.same_length:
928
+ all_ones = word_emb.new_ones((qlen, klen), dtype=torch.bool)
929
+ mask_len = klen - self.mem_len
930
+ if mask_len > 0:
931
+ mask_shift_len = qlen - mask_len
932
+ else:
933
+ mask_shift_len = qlen
934
+ dec_attn_mask = (torch.triu(all_ones, 1 + mlen) + torch.tril(all_ones, -mask_shift_len))[:, :, None] # -1
935
+ else:
936
+ dec_attn_mask = torch.triu(word_emb.new_ones((qlen, klen), dtype=torch.bool), diagonal=1 + mlen)[
937
+ :, :, None
938
+ ]
939
+
940
+ hids = []
941
+ attentions = [] if output_attentions else None
942
+ if self.attn_type == 0: # default
943
+ pos_seq = torch.arange(klen - 1, -1, -1.0, device=word_emb.device, dtype=torch.int64).type_as(
944
+ dtype=word_emb.dtype
945
+ )
946
+ if self.clamp_len > 0:
947
+ pos_seq.clamp_(max=self.clamp_len)
948
+ pos_emb = self.pos_emb(pos_seq)
949
+
950
+ core_out = self.drop(word_emb)
951
+ pos_emb = self.drop(pos_emb)
952
+
953
+ for i, layer in enumerate(self.layers):
954
+ hids.append(core_out)
955
+ mems_i = None if mems is None else mems[i]
956
+ layer_outputs = layer(
957
+ core_out,
958
+ pos_emb,
959
+ dec_attn_mask=dec_attn_mask,
960
+ mems=mems_i,
961
+ head_mask=head_mask[i],
962
+ output_attentions=output_attentions,
963
+ )
964
+ core_out = layer_outputs[0]
965
+ if output_attentions:
966
+ attentions.append(layer_outputs[1])
967
+ else: # learnable embeddings and absolute embeddings
968
+ raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
969
+
970
+ core_out = self.drop(core_out)
971
+
972
+ new_mems = self._update_mems(hids, mems, mlen, qlen)
973
+
974
+ if output_hidden_states:
975
+ # Add last layer and transpose to library standard shape [bsz, len, hidden_dim]
976
+ hids.append(core_out)
977
+ hids = tuple(t.transpose(0, 1).contiguous() for t in hids)
978
+ else:
979
+ hids = None
980
+ if output_attentions:
981
+ # Transpose to library standard shape [bsz, n_heads, query_seq_len, key_seq_len]
982
+ attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions)
983
+ # We transpose back here to shape [bsz, len, hidden_dim]
984
+ core_out = core_out.transpose(0, 1).contiguous()
985
+
986
+ if not return_dict:
987
+ return tuple(v for v in [core_out, new_mems, hids, attentions] if v is not None)
988
+
989
+ return TransfoXLModelOutput(
990
+ last_hidden_state=core_out,
991
+ mems=new_mems,
992
+ hidden_states=hids,
993
+ attentions=attentions,
994
+ )
995
+
996
+
997
+ @add_start_docstrings(
998
+ """
999
+ The Transformer-XL Model with a language modeling head on top (adaptive softmax with weights tied to the adaptive
1000
+ input embeddings)
1001
+ """,
1002
+ TRANSFO_XL_START_DOCSTRING,
1003
+ )
1004
+ class TransfoXLLMHeadModel(TransfoXLPreTrainedModel):
1005
+ _tied_weights_keys = [r"crit\.out_projs\.\d+", r"crit\.out_layers\.\d+\.weight"]
1006
+
1007
+ def __init__(self, config):
1008
+ super().__init__(config)
1009
+ self.transformer = TransfoXLModel(config)
1010
+ self.sample_softmax = config.sample_softmax
1011
+ self.trainer_compatible = getattr(config, "trainer_compatible", False)
1012
+
1013
+ if not self.trainer_compatible:
1014
+ warnings.warn(
1015
+ "The output of TransfoXL will be updated in v5 to support a single loss as first argument. In order "
1016
+ "to use that updated output, please specify `trainer_compatible=True` as your configuration"
1017
+ " attribute.",
1018
+ DeprecationWarning,
1019
+ )
1020
+
1021
+ assert self.sample_softmax <= 0, (
1022
+ "Sampling from the softmax is not implemented yet. Please look at issue: #3310:"
1023
+ " https://github.com/huggingface/transformers/issues/3310"
1024
+ )
1025
+
1026
+ self.crit = ProjectedAdaptiveLogSoftmax(
1027
+ config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val
1028
+ )
1029
+
1030
+ # Initialize weights and apply final processing
1031
+ self.post_init()
1032
+
1033
+ def tie_weights(self):
1034
+ """
1035
+ Run this to be sure output and input (adaptive) softmax weights are tied
1036
+ """
1037
+
1038
+ if self.config.tie_word_embeddings:
1039
+ for i in range(len(self.crit.out_layers)):
1040
+ self._tie_or_clone_weights(self.crit.out_layers[i], self.transformer.word_emb.emb_layers[i])
1041
+ if self.config.tie_projs:
1042
+ for i, tie_proj in enumerate(self.config.tie_projs):
1043
+ if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed:
1044
+ if self.config.torchscript:
1045
+ self.crit.out_projs[i] = nn.Parameter(self.transformer.word_emb.emb_projs[0].clone())
1046
+ else:
1047
+ self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0]
1048
+ elif tie_proj and self.config.div_val != 1:
1049
+ if self.config.torchscript:
1050
+ self.crit.out_projs[i] = nn.Parameter(self.transformer.word_emb.emb_projs[i].clone())
1051
+ else:
1052
+ self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i]
1053
+
1054
+ def reset_memory_length(self, mem_len):
1055
+ self.transformer.reset_memory_length(mem_len)
1056
+
1057
+ def init_mems(self, bsz):
1058
+ return self.transformer.init_mems(bsz)
1059
+
1060
+ @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
1061
+ @add_code_sample_docstrings(
1062
+ checkpoint=_CHECKPOINT_FOR_DOC,
1063
+ output_type=TransfoXLLMHeadModelOutput,
1064
+ config_class=_CONFIG_FOR_DOC,
1065
+ )
1066
+ def forward(
1067
+ self,
1068
+ input_ids: Optional[torch.LongTensor] = None,
1069
+ mems: Optional[List[torch.FloatTensor]] = None,
1070
+ head_mask: Optional[torch.FloatTensor] = None,
1071
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1072
+ labels: Optional[torch.LongTensor] = None,
1073
+ output_attentions: Optional[bool] = None,
1074
+ output_hidden_states: Optional[bool] = None,
1075
+ return_dict: Optional[bool] = None,
1076
+ ) -> Union[Tuple, TransfoXLLMHeadModelOutput]:
1077
+ r"""
1078
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1079
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
1080
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
1081
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
1082
+ """
1083
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1084
+ if input_ids is not None:
1085
+ bsz, tgt_len = input_ids.size(0), input_ids.size(1)
1086
+ elif inputs_embeds is not None:
1087
+ bsz, tgt_len = inputs_embeds.size(0), inputs_embeds.size(1)
1088
+ else:
1089
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1090
+
1091
+ transformer_outputs = self.transformer(
1092
+ input_ids,
1093
+ mems=mems,
1094
+ head_mask=head_mask,
1095
+ inputs_embeds=inputs_embeds,
1096
+ output_attentions=output_attentions,
1097
+ output_hidden_states=output_hidden_states,
1098
+ return_dict=return_dict,
1099
+ )
1100
+
1101
+ last_hidden = transformer_outputs[0]
1102
+ pred_hid = last_hidden[:, -tgt_len:]
1103
+
1104
+ if labels is not None:
1105
+ # Prevents all labels being -100 and throwing an error
1106
+ # when backwarding the loss
1107
+ miss_valid_label = labels[0, 1:].sum() == (labels.size(1) - 1) * -100
1108
+ if miss_valid_label:
1109
+ # Sets an <EOS> token, just to prevent loss from being NaN
1110
+ labels[0, 1] = self.config.eos_token_id
1111
+
1112
+ softmax_output = self.crit(pred_hid, labels)
1113
+ prediction_scores = softmax_output.view(bsz, tgt_len, -1) if labels is None else ()
1114
+
1115
+ if labels is not None:
1116
+ losses = softmax_output.view(bsz, tgt_len - 1)
1117
+ # Avoids from incorporating padding (-100) tokens into loss value
1118
+ loss = losses[losses != 0].mean()
1119
+ else:
1120
+ losses, loss = None, None
1121
+
1122
+ if not return_dict:
1123
+ if self.trainer_compatible:
1124
+ output = (prediction_scores, losses) if losses is not None else (prediction_scores,)
1125
+ output += transformer_outputs[1:]
1126
+ return ((loss,) + output) if loss is not None else output
1127
+ else:
1128
+ output = (prediction_scores, *transformer_outputs[1:])
1129
+ output = ((losses,) + output) if losses is not None else output
1130
+ return (output + (loss,)) if loss is not None else output
1131
+
1132
+ return TransfoXLLMHeadModelOutput(
1133
+ loss=loss,
1134
+ prediction_scores=prediction_scores,
1135
+ losses=losses,
1136
+ mems=transformer_outputs.mems,
1137
+ hidden_states=transformer_outputs.hidden_states,
1138
+ attentions=transformer_outputs.attentions,
1139
+ )
1140
+
1141
+ def get_output_embeddings(self):
1142
+ """Double-check if you are using adaptive softmax."""
1143
+ if self.sample_softmax > 0:
1144
+ return self.out_layer
1145
+ else:
1146
+ return self.crit.out_layers[-1]
1147
+
1148
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **model_kwargs):
1149
+ inputs = {}
1150
+
1151
+ # if past is defined in model kwargs then use it for faster decoding
1152
+ if past_key_values:
1153
+ inputs["mems"] = past_key_values
1154
+ inputs["input_ids"] = input_ids[:, -1].unsqueeze(-1)
1155
+ else:
1156
+ inputs["input_ids"] = input_ids
1157
+
1158
+ return inputs
1159
+
1160
+ def _resize_cutoffs(self, new_num_tokens, new_emb_size, new_embedding_shapes, layer):
1161
+ new_cutoffs = super()._resize_cutoffs(new_num_tokens, new_emb_size, new_embedding_shapes, layer)
1162
+
1163
+ self.crit.cutoffs = new_cutoffs
1164
+ self.crit.cutoff_ends = [0] + new_cutoffs
1165
+ self.crit.n_token = new_num_tokens
1166
+
1167
+ @staticmethod
1168
+ def _reorder_cache(mems: List[torch.Tensor], beam_idx: torch.Tensor) -> List[torch.Tensor]:
1169
+ """
1170
+ This function is used to re-order the `mems` cache if [`~PreTrainedModel.beam_search`] or
1171
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `mems` with the correct beam_idx at every
1172
+ generation step.
1173
+ """
1174
+ return [layer_past.index_select(1, beam_idx.to(layer_past.device)) for layer_past in mems]
1175
+
1176
+
1177
+ @add_start_docstrings(
1178
+ """
1179
+ The Transformer-XL Model transformer with a sequence classification head on top (linear layer).
1180
+
1181
+ [`TransfoXLForSequenceClassification`] uses the last token in order to do the classification, as other causal
1182
+ models (e.g. GPT-1) do.
1183
+
1184
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1185
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1186
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1187
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1188
+ each row of the batch).
1189
+ """,
1190
+ TRANSFO_XL_START_DOCSTRING,
1191
+ )
1192
+ class TransfoXLForSequenceClassification(TransfoXLPreTrainedModel):
1193
+ def __init__(self, config):
1194
+ super().__init__(config)
1195
+ self.num_labels = config.num_labels
1196
+ self.transformer = TransfoXLModel(config)
1197
+ self.score = nn.Linear(config.d_embed, self.num_labels, bias=False)
1198
+ # Initialize weights and apply final processing
1199
+ self.post_init()
1200
+
1201
+ @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
1202
+ @add_code_sample_docstrings(
1203
+ checkpoint=_CHECKPOINT_FOR_DOC,
1204
+ output_type=TransfoXLSequenceClassifierOutputWithPast,
1205
+ config_class=_CONFIG_FOR_DOC,
1206
+ )
1207
+ def forward(
1208
+ self,
1209
+ input_ids: Optional[torch.LongTensor] = None,
1210
+ mems: Optional[List[torch.FloatTensor]] = None,
1211
+ head_mask: Optional[torch.FloatTensor] = None,
1212
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1213
+ labels: Optional[torch.LongTensor] = None,
1214
+ output_attentions: Optional[bool] = None,
1215
+ output_hidden_states: Optional[bool] = None,
1216
+ return_dict: Optional[bool] = None,
1217
+ ) -> Union[Tuple, TransfoXLSequenceClassifierOutputWithPast]:
1218
+ r"""
1219
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1220
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1221
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1222
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1223
+ """
1224
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1225
+
1226
+ transformer_outputs = self.transformer(
1227
+ input_ids,
1228
+ mems=mems,
1229
+ head_mask=head_mask,
1230
+ inputs_embeds=inputs_embeds,
1231
+ output_attentions=output_attentions,
1232
+ output_hidden_states=output_hidden_states,
1233
+ return_dict=return_dict,
1234
+ )
1235
+ hidden_states = transformer_outputs[0]
1236
+ logits = self.score(hidden_states)
1237
+
1238
+ if input_ids is not None:
1239
+ batch_size, sequence_length = input_ids.shape[:2]
1240
+ else:
1241
+ batch_size, sequence_length = inputs_embeds.shape[:2]
1242
+
1243
+ assert (
1244
+ self.config.pad_token_id is not None or batch_size == 1
1245
+ ), "Cannot handle batch sizes > 1 if no padding token is defined."
1246
+ if self.config.pad_token_id is None:
1247
+ sequence_lengths = -1
1248
+ else:
1249
+ if input_ids is not None:
1250
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1251
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1252
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1253
+ sequence_lengths = sequence_lengths.to(logits.device)
1254
+ else:
1255
+ sequence_lengths = -1
1256
+ logger.warning(
1257
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
1258
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
1259
+ )
1260
+
1261
+ pooled_logits = logits[range(batch_size), sequence_lengths]
1262
+
1263
+ loss = None
1264
+ if labels is not None:
1265
+ if self.config.problem_type is None:
1266
+ if self.num_labels == 1:
1267
+ self.config.problem_type = "regression"
1268
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1269
+ self.config.problem_type = "single_label_classification"
1270
+ else:
1271
+ self.config.problem_type = "multi_label_classification"
1272
+
1273
+ if self.config.problem_type == "regression":
1274
+ loss_fct = MSELoss()
1275
+ if self.num_labels == 1:
1276
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1277
+ else:
1278
+ loss = loss_fct(pooled_logits, labels)
1279
+ elif self.config.problem_type == "single_label_classification":
1280
+ loss_fct = CrossEntropyLoss()
1281
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1282
+ elif self.config.problem_type == "multi_label_classification":
1283
+ loss_fct = BCEWithLogitsLoss()
1284
+ loss = loss_fct(pooled_logits, labels)
1285
+ if not return_dict:
1286
+ output = (pooled_logits,) + transformer_outputs[1:]
1287
+ return ((loss,) + output) if loss is not None else output
1288
+
1289
+ return TransfoXLSequenceClassifierOutputWithPast(
1290
+ loss=loss,
1291
+ logits=pooled_logits,
1292
+ mems=transformer_outputs.mems,
1293
+ hidden_states=transformer_outputs.hidden_states,
1294
+ attentions=transformer_outputs.attentions,
1295
+ )
venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_transfo_xl_utilities.py ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Utilities for PyTorch Transformer XL model. Directly adapted from https://github.com/kimiyoung/transformer-xl.
18
+ """
19
+
20
+
21
+ import torch
22
+ from torch import nn
23
+
24
+
25
+ # CUDA_MAJOR = int(torch.version.cuda.split('.')[0])
26
+ # CUDA_MINOR = int(torch.version.cuda.split('.')[1])
27
+
28
+
29
+ class ProjectedAdaptiveLogSoftmax(nn.Module):
30
+ def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, keep_order=False):
31
+ super().__init__()
32
+
33
+ self.n_token = n_token
34
+ self.d_embed = d_embed
35
+ self.d_proj = d_proj
36
+
37
+ self.cutoffs = cutoffs + [n_token]
38
+ self.cutoff_ends = [0] + self.cutoffs
39
+ self.div_val = div_val
40
+
41
+ self.shortlist_size = self.cutoffs[0]
42
+ self.n_clusters = len(self.cutoffs) - 1
43
+ self.head_size = self.shortlist_size + self.n_clusters
44
+
45
+ if self.n_clusters > 0:
46
+ self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
47
+ self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
48
+
49
+ self.out_layers = nn.ModuleList()
50
+ self.out_projs = nn.ParameterList()
51
+
52
+ if div_val == 1:
53
+ for i in range(len(self.cutoffs)):
54
+ if d_proj != d_embed:
55
+ self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
56
+ else:
57
+ self.out_projs.append(None)
58
+
59
+ self.out_layers.append(nn.Linear(d_embed, n_token))
60
+ else:
61
+ for i in range(len(self.cutoffs)):
62
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
63
+ d_emb_i = d_embed // (div_val**i)
64
+
65
+ self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))
66
+
67
+ self.out_layers.append(nn.Linear(d_emb_i, r_idx - l_idx))
68
+
69
+ self.keep_order = keep_order
70
+
71
+ def _compute_logit(self, hidden, weight, bias, proj):
72
+ if proj is None:
73
+ logit = nn.functional.linear(hidden, weight, bias=bias)
74
+ else:
75
+ # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
76
+ proj_hid = nn.functional.linear(hidden, proj.t().contiguous())
77
+ logit = nn.functional.linear(proj_hid, weight, bias=bias)
78
+ # else:
79
+ # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
80
+ # if bias is not None:
81
+ # logit = logit + bias
82
+
83
+ return logit
84
+
85
+ def forward(self, hidden, labels=None, keep_order=False):
86
+ """
87
+ Params:
88
+ hidden :: [len*bsz x d_proj]
89
+ labels :: [len*bsz]
90
+
91
+ Return:
92
+ if labels is None: out :: [len*bsz x n_tokens] log probabilities of tokens over the vocabulary else: out ::
93
+ [(len-1)*bsz] Negative log likelihood. We could replace this implementation by the native PyTorch one if
94
+ theirs had an option to set bias on all clusters in the native one. here:
95
+ https://github.com/pytorch/pytorch/blob/dbe6a7a9ff1a364a8706bf5df58a1ca96d2fd9da/torch/nn/modules/adaptive.py#L138
96
+ """
97
+
98
+ if labels is not None:
99
+ # Shift so that tokens < n predict n
100
+ hidden = hidden[..., :-1, :].contiguous()
101
+ labels = labels[..., 1:].contiguous()
102
+ hidden = hidden.view(-1, hidden.size(-1))
103
+ labels = labels.view(-1)
104
+ if hidden.size(0) != labels.size(0):
105
+ raise RuntimeError("Input and labels should have the same size in the batch dimension.")
106
+ else:
107
+ hidden = hidden.view(-1, hidden.size(-1))
108
+
109
+ if self.n_clusters == 0:
110
+ logit = self._compute_logit(hidden, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0])
111
+ if labels is not None:
112
+ mask = labels != -100
113
+ out = torch.zeros_like(labels, dtype=hidden.dtype, device=hidden.device)
114
+ out[mask] = (
115
+ -nn.functional.log_softmax(logit, dim=-1)[mask].gather(1, labels[mask].unsqueeze(1)).squeeze(1)
116
+ )
117
+ else:
118
+ out = nn.functional.log_softmax(logit, dim=-1)
119
+ else:
120
+ # construct weights and biases
121
+ weights, biases = [], []
122
+ for i in range(len(self.cutoffs)):
123
+ if self.div_val == 1:
124
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
125
+ weight_i = self.out_layers[0].weight[l_idx:r_idx]
126
+ bias_i = self.out_layers[0].bias[l_idx:r_idx]
127
+ else:
128
+ weight_i = self.out_layers[i].weight
129
+ bias_i = self.out_layers[i].bias
130
+
131
+ if i == 0:
132
+ weight_i = torch.cat([weight_i, self.cluster_weight], dim=0)
133
+ bias_i = torch.cat([bias_i, self.cluster_bias], dim=0)
134
+
135
+ weights.append(weight_i)
136
+ biases.append(bias_i)
137
+
138
+ head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
139
+
140
+ head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
141
+ head_logprob = nn.functional.log_softmax(head_logit, dim=1)
142
+
143
+ if labels is None:
144
+ out = hidden.new_empty((head_logit.size(0), self.n_token))
145
+ else:
146
+ out = torch.zeros_like(labels, dtype=hidden.dtype, device=hidden.device)
147
+
148
+ offset = 0
149
+ cutoff_values = [0] + self.cutoffs
150
+ for i in range(len(cutoff_values) - 1):
151
+ l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
152
+
153
+ if labels is not None:
154
+ mask_i = (labels >= l_idx) & (labels < r_idx)
155
+ indices_i = mask_i.nonzero().squeeze()
156
+
157
+ if indices_i.numel() == 0:
158
+ continue
159
+
160
+ target_i = labels.index_select(0, indices_i) - l_idx
161
+ head_logprob_i = head_logprob.index_select(0, indices_i)
162
+ hidden_i = hidden.index_select(0, indices_i)
163
+ else:
164
+ hidden_i = hidden
165
+
166
+ if i == 0:
167
+ if labels is not None:
168
+ logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
169
+ else:
170
+ out[:, : self.cutoffs[0]] = head_logprob[:, : self.cutoffs[0]]
171
+ else:
172
+ weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
173
+
174
+ tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
175
+ tail_logprob_i = nn.functional.log_softmax(tail_logit_i, dim=1)
176
+ cluster_prob_idx = self.cutoffs[0] + i - 1 # No probability for the head cluster
177
+ if labels is not None:
178
+ logprob_i = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
179
+ 1, target_i[:, None]
180
+ ).squeeze(1)
181
+ else:
182
+ logprob_i = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
183
+ out[:, l_idx:r_idx] = logprob_i
184
+
185
+ if labels is not None:
186
+ if (hasattr(self, "keep_order") and self.keep_order) or keep_order:
187
+ out.index_copy_(0, indices_i, -logprob_i)
188
+ else:
189
+ out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
190
+ offset += logprob_i.size(0)
191
+
192
+ return out
193
+
194
+ def log_prob(self, hidden):
195
+ r"""
196
+ Computes log probabilities for all \\(n\_classes\\) From:
197
+ https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/adaptive.p
198
+
199
+ Args:
200
+ hidden (Tensor): a minibatch of example
201
+
202
+ Returns:
203
+ log-probabilities of for each class \\(c\\) in range \\(0 <= c <= n\_classes\\), where \\(n\_classes\\) is
204
+ a parameter passed to `AdaptiveLogSoftmaxWithLoss` constructor. Shape:
205
+
206
+ - Input: \\((N, in\_features)\\)
207
+ - Output: \\((N, n\_classes)\\)
208
+ """
209
+ if self.n_clusters == 0:
210
+ logit = self._compute_logit(hidden, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0])
211
+ return nn.functional.log_softmax(logit, dim=-1)
212
+ else:
213
+ # construct weights and biases
214
+ weights, biases = [], []
215
+ for i in range(len(self.cutoffs)):
216
+ if self.div_val == 1:
217
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
218
+ weight_i = self.out_layers[0].weight[l_idx:r_idx]
219
+ bias_i = self.out_layers[0].bias[l_idx:r_idx]
220
+ else:
221
+ weight_i = self.out_layers[i].weight
222
+ bias_i = self.out_layers[i].bias
223
+
224
+ if i == 0:
225
+ weight_i = torch.cat([weight_i, self.cluster_weight], dim=0)
226
+ bias_i = torch.cat([bias_i, self.cluster_bias], dim=0)
227
+
228
+ weights.append(weight_i)
229
+ biases.append(bias_i)
230
+
231
+ head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
232
+ head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
233
+
234
+ out = hidden.new_empty((head_logit.size(0), self.n_token))
235
+ head_logprob = nn.functional.log_softmax(head_logit, dim=1)
236
+
237
+ cutoff_values = [0] + self.cutoffs
238
+ for i in range(len(cutoff_values) - 1):
239
+ start_idx, stop_idx = cutoff_values[i], cutoff_values[i + 1]
240
+
241
+ if i == 0:
242
+ out[:, : self.cutoffs[0]] = head_logprob[:, : self.cutoffs[0]]
243
+ else:
244
+ weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
245
+
246
+ tail_logit_i = self._compute_logit(hidden, weight_i, bias_i, proj_i)
247
+ tail_logprob_i = nn.functional.log_softmax(tail_logit_i, dim=1)
248
+
249
+ logprob_i = head_logprob[:, -i] + tail_logprob_i
250
+ out[:, start_idx, stop_idx] = logprob_i
251
+
252
+ return out
venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/tokenization_transfo_xl.py ADDED
@@ -0,0 +1,819 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Tokenization classes for Transformer XL model. Adapted from https://github.com/kimiyoung/transformer-xl.
18
+ """
19
+
20
+
21
+ import glob
22
+ import os
23
+ import pickle
24
+ import re
25
+ from collections import Counter, OrderedDict
26
+ from typing import List, Optional, Tuple
27
+
28
+ import numpy as np
29
+
30
+ from ....tokenization_utils import PreTrainedTokenizer
31
+ from ....utils import (
32
+ cached_file,
33
+ is_sacremoses_available,
34
+ is_torch_available,
35
+ logging,
36
+ requires_backends,
37
+ strtobool,
38
+ torch_only_method,
39
+ )
40
+
41
+
42
+ if is_sacremoses_available():
43
+ import sacremoses as sm
44
+
45
+
46
+ if is_torch_available():
47
+ import torch
48
+
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+ VOCAB_FILES_NAMES = {
53
+ "pretrained_vocab_file": "vocab.pkl",
54
+ "pretrained_vocab_file_torch": "vocab.bin",
55
+ "vocab_file": "vocab.txt",
56
+ }
57
+
58
+
59
+ PRETRAINED_CORPUS_ARCHIVE_MAP = {
60
+ "transfo-xl/transfo-xl-wt103": "https://huggingface.co/transfo-xl/transfo-xl-wt103/resolve/main/corpus.bin",
61
+ }
62
+ CORPUS_NAME = "corpus.bin"
63
+
64
+ MATCH_NUMBERS = r"(?<=\d)[,.](?=\d)", r" @\g<0>@ "
65
+ DETOKENIZE_NUMBERS = [(r" @\,@ ", r","), (r" @\.@ ", r".")]
66
+
67
+
68
+ def tokenize_numbers(text_array: List[str]) -> List[str]:
69
+ """
70
+ Splits large comma-separated numbers and floating point values. This is done by replacing commas with ' @,@ ' and
71
+ dots with ' @.@ '.
72
+
73
+ Args:
74
+ text_array: An already tokenized text as list.
75
+
76
+ Returns:
77
+ A list of strings with tokenized numbers.
78
+
79
+ Example:
80
+
81
+ ```python
82
+ >>> tokenize_numbers(["$", "5,000", "1.73", "m"])
83
+ ['$', '5', '@,@', '000', '1', '@.@', '73', 'm']
84
+ ```"""
85
+ tokenized = []
86
+ for i in range(len(text_array)):
87
+ reg, sub = MATCH_NUMBERS
88
+ replaced = re.sub(reg, sub, text_array[i]).split()
89
+ tokenized.extend(replaced)
90
+
91
+ return tokenized
92
+
93
+
94
+ def detokenize_numbers(text: str) -> str:
95
+ """
96
+ Inverts the operation of *tokenize_numbers*. This is replacing ' @,@ ' and ' @.@' by ',' and '.'.
97
+
98
+ Args:
99
+ text: A string where the number should be detokenized.
100
+
101
+ Returns:
102
+ A detokenized string.
103
+
104
+ Example:
105
+
106
+ ```python
107
+ >>> detokenize_numbers("$ 5 @,@ 000 1 @.@ 73 m")
108
+ '$ 5,000 1.73 m'
109
+ ```"""
110
+ for reg, sub in DETOKENIZE_NUMBERS:
111
+ text = re.sub(reg, sub, text)
112
+ return text
113
+
114
+
115
+ class TransfoXLTokenizer(PreTrainedTokenizer):
116
+ """
117
+ Construct a Transformer-XL tokenizer adapted from Vocab class in [the original
118
+ code](https://github.com/kimiyoung/transformer-xl). The Transformer-XL tokenizer is a word-level tokenizer (no
119
+ sub-word tokenization).
120
+
121
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
122
+ this superclass for more information regarding those methods.
123
+
124
+ Args:
125
+ special (`List[str]`, *optional*):
126
+ A list of special tokens (to be treated by the original implementation of this tokenizer).
127
+ min_freq (`int`, *optional*, defaults to 0):
128
+ The minimum number of times a token has to be present in order to be kept in the vocabulary (otherwise it
129
+ will be mapped to `unk_token`).
130
+ max_size (`int`, *optional*):
131
+ The maximum size of the vocabulary. If left unset, it will default to the size of the vocabulary found
132
+ after excluding the tokens according to the `min_freq` rule.
133
+ lower_case (`bool`, *optional*, defaults to `False`):
134
+ Whether or not to lowercase the input when tokenizing.
135
+ delimiter (`str`, *optional*):
136
+ The delimiter used between tokens.
137
+ vocab_file (`str`, *optional*):
138
+ File containing the vocabulary (from the original implementation).
139
+ pretrained_vocab_file (`str`, *optional*):
140
+ File containing the vocabulary as saved with the `save_pretrained()` method.
141
+ never_split (`List[str]`, *optional*):
142
+ List of tokens that should never be split. If no list is specified, will simply use the existing special
143
+ tokens.
144
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
145
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
146
+ token instead.
147
+ eos_token (`str`, *optional*, defaults to `"<eos>"`):
148
+ The end of sequence token.
149
+ additional_special_tokens (`List[str]`, *optional*, defaults to `['<formula>']`):
150
+ A list of additional special tokens (for the HuggingFace functionality).
151
+ language (`str`, *optional*, defaults to `"en"`):
152
+ The language of this tokenizer (used for mose preprocessing).
153
+ """
154
+
155
+ vocab_files_names = VOCAB_FILES_NAMES
156
+ model_input_names = ["input_ids"]
157
+
158
+ def __init__(
159
+ self,
160
+ special=None,
161
+ min_freq=0,
162
+ max_size=None,
163
+ lower_case=False,
164
+ delimiter=None,
165
+ vocab_file=None,
166
+ pretrained_vocab_file: str = None,
167
+ never_split=None,
168
+ unk_token="<unk>",
169
+ eos_token="<eos>",
170
+ additional_special_tokens=["<formula>"],
171
+ language="en",
172
+ **kwargs,
173
+ ):
174
+ logger.error(
175
+ "`TransfoXL` was deprecated due to security issues linked to `pickle.load` in `TransfoXLTokenizer`. "
176
+ "See more details on this model's documentation page: "
177
+ "`https://github.com/huggingface/transformers/blob/main/docs/source/en/model_doc/transfo-xl.md`."
178
+ )
179
+
180
+ requires_backends(self, "sacremoses")
181
+ if special is None:
182
+ special = []
183
+ self.counter = Counter()
184
+ self.special = special
185
+ self.min_freq = min_freq
186
+ self.max_size = max_size
187
+ self.lower_case = lower_case
188
+ self.delimiter = delimiter
189
+ self.vocab_file = vocab_file
190
+ self.punctuation_symbols = '!"#$%&()*+,-./\\:;<=>?@[\\]^_`{|}~'
191
+ self.punction_without_space_before_pattern = re.compile(rf"[^\s][{self.punctuation_symbols}]")
192
+ self.punctuation_with_space_around_pattern = self._compile_space_around_punctuation_pattern()
193
+ self.language = language
194
+ self.moses_punct_normalizer = sm.MosesPunctNormalizer(language)
195
+ self.moses_tokenizer = sm.MosesTokenizer(language)
196
+ self.moses_detokenizer = sm.MosesDetokenizer(language)
197
+ self.idx2sym = []
198
+ self.sym2idx = OrderedDict()
199
+ # This try... catch... is not beautiful but honestly this tokenizer was not made to be used
200
+ # in a library like ours, at all.
201
+ try:
202
+ vocab_dict = None
203
+ if pretrained_vocab_file is not None:
204
+ # Priority on pickle files (support PyTorch and TF)
205
+ if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")):
206
+ raise ValueError(
207
+ "This part uses `pickle.load` which is insecure and will execute arbitrary code that is "
208
+ "potentially malicious. It's recommended to never unpickle data that could have come from an "
209
+ "untrusted source, or that could have been tampered with. If you already verified the pickle "
210
+ "data and decided to use it, you can set the environment variable "
211
+ "`TRUST_REMOTE_CODE` to `True` to allow it."
212
+ )
213
+ with open(pretrained_vocab_file, "rb") as f:
214
+ vocab_dict = pickle.load(f)
215
+
216
+ # Loading a torch-saved transfo-xl vocab dict with pickle results in an integer
217
+ # Entering this if statement means that we tried to load a torch-saved file with pickle, and we failed.
218
+ # We therefore load it with torch, if it's available.
219
+ if isinstance(vocab_dict, int):
220
+ if not is_torch_available():
221
+ raise ImportError(
222
+ "Not trying to load dict with PyTorch as you need to install pytorch to load "
223
+ "from a PyTorch pretrained vocabulary, "
224
+ "or activate it with environment variables USE_TORCH=1 and USE_TF=0."
225
+ )
226
+ vocab_dict = torch.load(pretrained_vocab_file)
227
+
228
+ if vocab_dict is not None:
229
+ for key, value in vocab_dict.items():
230
+ if key not in self.__dict__ or key in ["sym2idx", "idx2sym"]:
231
+ self.__dict__[key] = value
232
+ elif vocab_file is not None:
233
+ self.build_vocab()
234
+
235
+ except Exception as e:
236
+ raise ValueError(
237
+ f"Unable to parse file {pretrained_vocab_file}. Unknown format. "
238
+ "If you tried to load a model saved through TransfoXLTokenizerFast, "
239
+ "please note they are not compatible."
240
+ ) from e
241
+
242
+ if vocab_file is not None:
243
+ self.build_vocab()
244
+
245
+ super().__init__(
246
+ special=special,
247
+ min_freq=min_freq,
248
+ max_size=max_size,
249
+ lower_case=lower_case,
250
+ delimiter=delimiter,
251
+ vocab_file=vocab_file,
252
+ pretrained_vocab_file=pretrained_vocab_file,
253
+ never_split=never_split,
254
+ unk_token=unk_token,
255
+ eos_token=eos_token,
256
+ additional_special_tokens=additional_special_tokens,
257
+ language=language,
258
+ **kwargs,
259
+ )
260
+
261
+ # these are not required to initialize the parent class as only used when tokenizing.
262
+ if never_split is None:
263
+ never_split = self.all_special_tokens
264
+ self.never_split = never_split
265
+
266
+ @property
267
+ def do_lower_case(self):
268
+ return self.lower_case
269
+
270
+ def _compile_space_around_punctuation_pattern(self):
271
+ look_ahead_for_special_token = f"(?=[{self.punctuation_symbols}])"
272
+ look_ahead_to_match_all_except_space = r"(?=[^\s])"
273
+ return re.compile(r"" + look_ahead_for_special_token + look_ahead_to_match_all_except_space)
274
+
275
+ def count_file(self, path, verbose=False, add_eos=False):
276
+ if verbose:
277
+ logger.info(f"counting file {path} ...")
278
+ assert os.path.exists(path), f"Input file {path} not found"
279
+
280
+ sents = []
281
+ with open(path, "r", encoding="utf-8") as f:
282
+ for idx, line in enumerate(f):
283
+ if verbose and idx > 0 and idx % 500000 == 0:
284
+ logger.info(f" line {idx}")
285
+ symbols = self.tokenize(line, add_eos=add_eos)
286
+ self.counter.update(symbols)
287
+ sents.append(symbols)
288
+
289
+ return sents
290
+
291
+ def count_sents(self, sents, verbose=False):
292
+ """
293
+ sents : a list of sentences, each a list of tokenized symbols
294
+ """
295
+ if verbose:
296
+ logger.info(f"counting {len(sents)} sents ...")
297
+ for idx, symbols in enumerate(sents):
298
+ if verbose and idx > 0 and idx % 500000 == 0:
299
+ logger.info(f" line {idx}")
300
+ self.counter.update(symbols)
301
+
302
+ def _build_from_file(self, vocab_file):
303
+ self.idx2sym = []
304
+ self.sym2idx = OrderedDict()
305
+
306
+ with open(vocab_file, "r", encoding="utf-8") as f:
307
+ for line in f:
308
+ symb = line.strip().split()[0]
309
+ self.add_symbol(symb)
310
+ if "<UNK>" in self.sym2idx:
311
+ self.unk_idx = self.sym2idx["<UNK>"]
312
+ elif "<unk>" in self.sym2idx:
313
+ self.unk_idx = self.sym2idx["<unk>"]
314
+ else:
315
+ raise ValueError("Token not in vocabulary and no <unk> token in vocabulary for replacement.")
316
+
317
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
318
+ if os.path.isdir(save_directory):
319
+ vocab_file = os.path.join(
320
+ save_directory,
321
+ (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["pretrained_vocab_file"],
322
+ )
323
+ else:
324
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
325
+ with open(vocab_file, "wb") as f:
326
+ pickle.dump(self.__dict__, f)
327
+ return (vocab_file,)
328
+
329
+ def build_vocab(self):
330
+ if self.vocab_file:
331
+ logger.info(f"building vocab from {self.vocab_file}")
332
+ self._build_from_file(self.vocab_file)
333
+ logger.info(f"Final vocab size {len(self.sym2idx)}")
334
+ else:
335
+ logger.info(f"building vocab with min_freq={self.min_freq}, max_size={self.max_size}")
336
+ self.idx2sym = []
337
+ self.sym2idx = OrderedDict()
338
+
339
+ for sym in self.special:
340
+ self.add_special(sym)
341
+
342
+ for sym, cnt in self.counter.most_common(self.max_size):
343
+ if cnt < self.min_freq:
344
+ break
345
+ self.add_symbol(sym)
346
+
347
+ logger.info(f"Final vocab size {len(self.sym2idx)} from {len(self.counter)} unique tokens")
348
+
349
+ @torch_only_method
350
+ def encode_file(self, path, ordered=False, verbose=False, add_eos=True, add_double_eos=False):
351
+ if verbose:
352
+ logger.info(f"encoding file {path} ...")
353
+ assert os.path.exists(path), f"Output file {path} not found"
354
+ encoded = []
355
+ with open(path, "r", encoding="utf-8") as f:
356
+ for idx, line in enumerate(f):
357
+ if verbose and idx > 0 and idx % 500000 == 0:
358
+ logger.info(f" line {idx}")
359
+ symbols = self.tokenize(line, add_eos=add_eos, add_double_eos=add_double_eos)
360
+ encoded.append(self.convert_to_tensor(symbols))
361
+
362
+ if ordered:
363
+ encoded = torch.cat(encoded)
364
+
365
+ return encoded
366
+
367
+ @torch_only_method
368
+ def encode_sents(self, sents, ordered=False, verbose=False):
369
+ if verbose:
370
+ logger.info(f"encoding {len(sents)} sents ...")
371
+ encoded = []
372
+ for idx, symbols in enumerate(sents):
373
+ if verbose and idx > 0 and idx % 500000 == 0:
374
+ logger.info(f" line {idx}")
375
+ encoded.append(self.convert_to_tensor(symbols))
376
+
377
+ if ordered:
378
+ encoded = torch.cat(encoded)
379
+
380
+ return encoded
381
+
382
+ def add_special(self, sym):
383
+ if sym not in self.sym2idx:
384
+ self.idx2sym.append(sym)
385
+ self.sym2idx[sym] = len(self.idx2sym) - 1
386
+ setattr(self, f"{sym.strip('<>')}_idx", self.sym2idx[sym])
387
+
388
+ def add_symbol(self, sym):
389
+ if sym not in self.sym2idx:
390
+ self.idx2sym.append(sym)
391
+ self.sym2idx[sym] = len(self.idx2sym) - 1
392
+
393
+ def move_added_token(self, token: str, target_idx: int):
394
+ """
395
+ Moves an added token to a specific position in the vocab. This method should be used when resizing an embedding
396
+ layer other than the last one in the `AdaptiveEmbedding` in order to move the token in the tokenizer from the
397
+ default position (at the very end) to the desired one.
398
+
399
+ Args:
400
+ token: The token to move to a specific position in the vocab.
401
+ target_idx: The position where the token should be moved to.
402
+ """
403
+ assert token in self.added_tokens_encoder, "Token which should be moved has to be an added token"
404
+ assert token not in self.idx2sym, "Token which should be moved is already in vocab"
405
+
406
+ # Insert sym into vocab
407
+ self.idx2sym.insert(target_idx, token)
408
+ self.sym2idx[token] = target_idx
409
+
410
+ # Shift following indices in sym2idx
411
+ for idx in range(target_idx + 1, len(self.idx2sym)):
412
+ current_sym = self.idx2sym[idx]
413
+ self.sym2idx[current_sym] = idx
414
+
415
+ # Delete token from added_tokens
416
+ old_index = self._added_tokens_encoder.pop(token)
417
+ self._added_tokens_decoder.pop(old_index)
418
+
419
+ def moses_punct_norm(self, text):
420
+ return self.moses_punct_normalizer.normalize(text)
421
+
422
+ def moses_tokenize(self, text):
423
+ return self.moses_tokenizer.tokenize(
424
+ text, aggressive_dash_splits=True, return_str=False, escape=False, protected_patterns=self.never_split
425
+ )
426
+
427
+ def moses_pipeline(self, text: str) -> List[str]:
428
+ """
429
+ Does basic tokenization using [`sacremoses.MosesPunctNormalizer`] and [`sacremoses.MosesTokenizer`] with
430
+ *aggressive_dash_splits=True* (see [`sacremoses.tokenize.MosesTokenizer.tokenize`]). Additionally, large
431
+ comma-separated numbers and floating point values are split. E.g. "23,000 people are 1.80m tall" -> "23 @,@ 000
432
+ people are 1 @.@ 80m tall"
433
+
434
+ Args:
435
+ text: Text to be tokenize
436
+
437
+ Returns:
438
+ A list of tokenized string
439
+
440
+ Example:
441
+
442
+ ```python
443
+ >>> tokenizer = TransfoXLTokenizer.from_pretrained("transfo-xl/transfo-xl-wt103")
444
+ >>> tokenizer.moses_pipeline("23,000 people are 1.80 m tall")
445
+ ['23', '@,@', '000', 'people', 'are', '1', '@.@', '80', 'm', 'tall']
446
+ ```"""
447
+ text = self.moses_punct_norm(text)
448
+ text = self.moses_tokenize(text)
449
+ text = tokenize_numbers(text)
450
+ return text
451
+
452
+ def _convert_id_to_token(self, idx):
453
+ """Converts an id in a token (BPE) using the vocab."""
454
+ assert 0 <= idx < len(self), f"Index {idx} out of vocabulary range"
455
+ return self.idx2sym[idx]
456
+
457
+ def _convert_token_to_id(self, sym):
458
+ """Converts a token (str) in an id using the vocab."""
459
+ if sym in self.sym2idx:
460
+ return self.sym2idx[sym]
461
+ else:
462
+ # logger.info(f'encounter unk {sym}')
463
+ # assert '<eos>' not in sym
464
+ if hasattr(self, "unk_idx"):
465
+ return self.sym2idx.get(sym, self.unk_idx)
466
+ # Backward compatibility with pre-trained models
467
+ elif "<unk>" in self.sym2idx:
468
+ return self.sym2idx["<unk>"]
469
+ elif "<UNK>" in self.sym2idx:
470
+ return self.sym2idx["<UNK>"]
471
+ else:
472
+ raise ValueError("Token not in vocabulary and no <unk> token in vocabulary for replacement.")
473
+
474
+ def convert_tokens_to_string(self, tokens):
475
+ """
476
+ Converts a sequence of tokens (string) in a single string. Additionally, the split numbers are converted back
477
+ into it's original form.
478
+ """
479
+ out_string = self.moses_detokenizer.detokenize(tokens)
480
+ return detokenize_numbers(out_string).strip()
481
+
482
+ @torch_only_method
483
+ def convert_to_tensor(self, symbols):
484
+ return torch.LongTensor(self.convert_tokens_to_ids(symbols))
485
+
486
+ @property
487
+ def vocab_size(self):
488
+ return len(self.idx2sym)
489
+
490
+ def get_vocab(self):
491
+ vocab = self.sym2idx.copy()
492
+ vocab.update(self.added_tokens_encoder)
493
+ return vocab
494
+
495
+ def _tokenize(self, line, add_eos=False, add_double_eos=False):
496
+ line = line.strip()
497
+ # convert to lower case
498
+ if self.lower_case:
499
+ line = line.lower()
500
+
501
+ # empty delimiter '' will evaluate False
502
+ if self.delimiter == "":
503
+ symbols = line
504
+ else:
505
+ symbols = self.moses_pipeline(line)
506
+
507
+ if add_double_eos: # lm1b
508
+ return ["<S>"] + symbols + ["<S>"]
509
+ elif add_eos:
510
+ return symbols + ["<eos>"]
511
+ else:
512
+ return symbols
513
+
514
+
515
+ class LMOrderedIterator(object):
516
+ def __init__(self, data, bsz, bptt, device="cpu", ext_len=None):
517
+ """
518
+ data -- LongTensor -- the LongTensor is strictly ordered
519
+ """
520
+ self.bsz = bsz
521
+ self.bptt = bptt
522
+ self.ext_len = ext_len if ext_len is not None else 0
523
+
524
+ self.device = device
525
+
526
+ # Work out how cleanly we can divide the dataset into bsz parts.
527
+ self.n_step = data.size(0) // bsz
528
+
529
+ # Trim off any extra elements that wouldn't cleanly fit (remainders).
530
+ data = data.narrow(0, 0, self.n_step * bsz)
531
+
532
+ # Evenly divide the data across the bsz batches.
533
+ self.data = data.view(bsz, -1).t().contiguous().to(device)
534
+
535
+ # Number of mini-batches
536
+ self.n_batch = (self.n_step + self.bptt - 1) // self.bptt
537
+
538
+ def get_batch(self, i, bptt=None):
539
+ if bptt is None:
540
+ bptt = self.bptt
541
+ seq_len = min(bptt, self.data.size(0) - 1 - i)
542
+
543
+ end_idx = i + seq_len
544
+ beg_idx = max(0, i - self.ext_len)
545
+
546
+ data = self.data[beg_idx:end_idx]
547
+ target = self.data[i + 1 : i + 1 + seq_len]
548
+
549
+ data_out = data.transpose(0, 1).contiguous().to(self.device)
550
+ target_out = target.transpose(0, 1).contiguous().to(self.device)
551
+
552
+ return data_out, target_out, seq_len
553
+
554
+ def get_fixlen_iter(self, start=0):
555
+ for i in range(start, self.data.size(0) - 1, self.bptt):
556
+ yield self.get_batch(i)
557
+
558
+ def get_varlen_iter(self, start=0, std=5, min_len=5, max_deviation=3):
559
+ max_len = self.bptt + max_deviation * std
560
+ i = start
561
+ while True:
562
+ bptt = self.bptt if np.random.random() < 0.95 else self.bptt / 2.0
563
+ bptt = min(max_len, max(min_len, int(np.random.normal(bptt, std))))
564
+ data, target, seq_len = self.get_batch(i, bptt)
565
+ i += seq_len
566
+ yield data, target, seq_len
567
+ if i >= self.data.size(0) - 2:
568
+ break
569
+
570
+ def __iter__(self):
571
+ return self.get_fixlen_iter()
572
+
573
+
574
+ class LMShuffledIterator(object):
575
+ def __init__(self, data, bsz, bptt, device="cpu", ext_len=None, shuffle=False):
576
+ """
577
+ data -- list[LongTensor] -- there is no order among the LongTensors
578
+ """
579
+ self.data = data
580
+
581
+ self.bsz = bsz
582
+ self.bptt = bptt
583
+ self.ext_len = ext_len if ext_len is not None else 0
584
+
585
+ self.device = device
586
+ self.shuffle = shuffle
587
+
588
+ def get_sent_stream(self):
589
+ # index iterator
590
+ epoch_indices = np.random.permutation(len(self.data)) if self.shuffle else np.array(range(len(self.data)))
591
+
592
+ # sentence iterator
593
+ for idx in epoch_indices:
594
+ yield self.data[idx]
595
+
596
+ @torch_only_method
597
+ def stream_iterator(self, sent_stream):
598
+ # streams for each data in the batch
599
+ streams = [None] * self.bsz
600
+
601
+ data = torch.LongTensor(self.bptt, self.bsz)
602
+ target = torch.LongTensor(self.bptt, self.bsz)
603
+
604
+ n_retain = 0
605
+
606
+ while True:
607
+ # data : [n_retain+bptt x bsz]
608
+ # target : [bptt x bsz]
609
+ data[n_retain:].fill_(-1)
610
+ target.fill_(-1)
611
+
612
+ valid_batch = True
613
+
614
+ for i in range(self.bsz):
615
+ n_filled = 0
616
+ try:
617
+ while n_filled < self.bptt:
618
+ if streams[i] is None or len(streams[i]) <= 1:
619
+ streams[i] = next(sent_stream)
620
+ # number of new tokens to fill in
621
+ n_new = min(len(streams[i]) - 1, self.bptt - n_filled)
622
+ # first n_retain tokens are retained from last batch
623
+ data[n_retain + n_filled : n_retain + n_filled + n_new, i] = streams[i][:n_new]
624
+ target[n_filled : n_filled + n_new, i] = streams[i][1 : n_new + 1]
625
+ streams[i] = streams[i][n_new:]
626
+ n_filled += n_new
627
+ except StopIteration:
628
+ valid_batch = False
629
+ break
630
+
631
+ if not valid_batch:
632
+ return
633
+
634
+ data_out = data.transpose(0, 1).contiguous().to(self.device)
635
+ target_out = target.transpose(0, 1).contiguous().to(self.device)
636
+
637
+ yield data_out, target_out, self.bptt
638
+
639
+ n_retain = min(data.size(0), self.ext_len)
640
+ if n_retain > 0:
641
+ data[:n_retain] = data[-n_retain:]
642
+ data.resize_(n_retain + self.bptt, data.size(1))
643
+
644
+ def __iter__(self):
645
+ # sent_stream is an iterator
646
+ sent_stream = self.get_sent_stream()
647
+
648
+ for batch in self.stream_iterator(sent_stream):
649
+ yield batch
650
+
651
+
652
+ class LMMultiFileIterator(LMShuffledIterator):
653
+ def __init__(self, paths, vocab, bsz, bptt, device="cpu", ext_len=None, shuffle=False):
654
+ self.paths = paths
655
+ self.vocab = vocab
656
+
657
+ self.bsz = bsz
658
+ self.bptt = bptt
659
+ self.ext_len = ext_len if ext_len is not None else 0
660
+
661
+ self.device = device
662
+ self.shuffle = shuffle
663
+
664
+ def get_sent_stream(self, path):
665
+ sents = self.vocab.encode_file(path, add_double_eos=True)
666
+ if self.shuffle:
667
+ np.random.shuffle(sents)
668
+ sent_stream = iter(sents)
669
+
670
+ return sent_stream
671
+
672
+ def __iter__(self):
673
+ if self.shuffle:
674
+ np.random.shuffle(self.paths)
675
+
676
+ for path in self.paths:
677
+ # sent_stream is an iterator
678
+ sent_stream = self.get_sent_stream(path)
679
+ for batch in self.stream_iterator(sent_stream):
680
+ yield batch
681
+
682
+
683
+ class TransfoXLCorpus(object):
684
+ @classmethod
685
+ @torch_only_method
686
+ def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
687
+ """
688
+ Instantiate a pre-processed corpus.
689
+ """
690
+ vocab = TransfoXLTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
691
+ is_local = os.path.isdir(pretrained_model_name_or_path)
692
+ # redirect to the cache, if necessary
693
+ try:
694
+ resolved_corpus_file = cached_file(pretrained_model_name_or_path, CORPUS_NAME, cache_dir=cache_dir)
695
+ except EnvironmentError:
696
+ logger.error(
697
+ f"Corpus '{pretrained_model_name_or_path}' was not found in corpus list"
698
+ f" ({', '.join(PRETRAINED_CORPUS_ARCHIVE_MAP.keys())}. We assumed '{pretrained_model_name_or_path}'"
699
+ f" was a path or url but couldn't find files {CORPUS_NAME} at this path or url."
700
+ )
701
+ return None
702
+ if is_local:
703
+ logger.info(f"loading corpus file {resolved_corpus_file}")
704
+ else:
705
+ logger.info(f"loading corpus file {CORPUS_NAME} from cache at {resolved_corpus_file}")
706
+
707
+ # Instantiate tokenizer.
708
+ corpus = cls(*inputs, **kwargs)
709
+ corpus_dict = torch.load(resolved_corpus_file)
710
+ for key, value in corpus_dict.items():
711
+ corpus.__dict__[key] = value
712
+ corpus.vocab = vocab
713
+ if corpus.train is not None:
714
+ corpus.train = torch.tensor(corpus.train, dtype=torch.long)
715
+ if corpus.valid is not None:
716
+ corpus.valid = torch.tensor(corpus.valid, dtype=torch.long)
717
+ if corpus.test is not None:
718
+ corpus.test = torch.tensor(corpus.test, dtype=torch.long)
719
+ return corpus
720
+
721
+ def __init__(self, *args, **kwargs):
722
+ self.vocab = TransfoXLTokenizer(*args, **kwargs)
723
+ self.dataset = None
724
+ self.train = None
725
+ self.valid = None
726
+ self.test = None
727
+
728
+ def build_corpus(self, path, dataset):
729
+ self.dataset = dataset
730
+
731
+ if self.dataset in ["ptb", "wt2", "enwik8", "text8"]:
732
+ self.vocab.count_file(os.path.join(path, "train.txt"))
733
+ self.vocab.count_file(os.path.join(path, "valid.txt"))
734
+ self.vocab.count_file(os.path.join(path, "test.txt"))
735
+ elif self.dataset == "wt103":
736
+ self.vocab.count_file(os.path.join(path, "train.txt"))
737
+ elif self.dataset == "lm1b":
738
+ train_path_pattern = os.path.join(
739
+ path,
740
+ "1-billion-word-language-modeling-benchmark-r13output",
741
+ "training-monolingual.tokenized.shuffled",
742
+ "news.en-*",
743
+ )
744
+ train_paths = glob.glob(train_path_pattern)
745
+ # the vocab will load from file when build_vocab() is called
746
+
747
+ self.vocab.build_vocab()
748
+
749
+ if self.dataset in ["ptb", "wt2", "wt103"]:
750
+ self.train = self.vocab.encode_file(os.path.join(path, "train.txt"), ordered=True)
751
+ self.valid = self.vocab.encode_file(os.path.join(path, "valid.txt"), ordered=True)
752
+ self.test = self.vocab.encode_file(os.path.join(path, "test.txt"), ordered=True)
753
+ elif self.dataset in ["enwik8", "text8"]:
754
+ self.train = self.vocab.encode_file(os.path.join(path, "train.txt"), ordered=True, add_eos=False)
755
+ self.valid = self.vocab.encode_file(os.path.join(path, "valid.txt"), ordered=True, add_eos=False)
756
+ self.test = self.vocab.encode_file(os.path.join(path, "test.txt"), ordered=True, add_eos=False)
757
+ elif self.dataset == "lm1b":
758
+ self.train = train_paths
759
+ self.valid = self.vocab.encode_file(os.path.join(path, "valid.txt"), ordered=False, add_double_eos=True)
760
+ self.test = self.vocab.encode_file(os.path.join(path, "test.txt"), ordered=False, add_double_eos=True)
761
+
762
+ def get_iterator(self, split, *args, **kwargs):
763
+ if split == "train":
764
+ if self.dataset in ["ptb", "wt2", "wt103", "enwik8", "text8"]:
765
+ data_iter = LMOrderedIterator(self.train, *args, **kwargs)
766
+ elif self.dataset == "lm1b":
767
+ kwargs["shuffle"] = True
768
+ data_iter = LMMultiFileIterator(self.train, self.vocab, *args, **kwargs)
769
+ elif split in ["valid", "test"]:
770
+ data = self.valid if split == "valid" else self.test
771
+ if self.dataset in ["ptb", "wt2", "wt103", "enwik8", "text8"]:
772
+ data_iter = LMOrderedIterator(data, *args, **kwargs)
773
+ elif self.dataset == "lm1b":
774
+ data_iter = LMShuffledIterator(data, *args, **kwargs)
775
+ else:
776
+ data_iter = None
777
+ raise ValueError(f"Split not recognized: {split}")
778
+
779
+ return data_iter
780
+
781
+
782
+ @torch_only_method
783
+ def get_lm_corpus(datadir, dataset):
784
+ fn = os.path.join(datadir, "cache.pt")
785
+ fn_pickle = os.path.join(datadir, "cache.pkl")
786
+ if os.path.exists(fn):
787
+ logger.info("Loading cached dataset...")
788
+ corpus = torch.load(fn_pickle)
789
+ elif os.path.exists(fn):
790
+ logger.info("Loading cached dataset from pickle...")
791
+ if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")):
792
+ raise ValueError(
793
+ "This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially "
794
+ "malicious. It's recommended to never unpickle data that could have come from an untrusted source, or "
795
+ "that could have been tampered with. If you already verified the pickle data and decided to use it, "
796
+ "you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it."
797
+ )
798
+ with open(fn, "rb") as fp:
799
+ corpus = pickle.load(fp)
800
+ else:
801
+ logger.info(f"Producing dataset {dataset}...")
802
+ kwargs = {}
803
+ if dataset in ["wt103", "wt2"]:
804
+ kwargs["special"] = ["<eos>"]
805
+ kwargs["lower_case"] = False
806
+ elif dataset == "ptb":
807
+ kwargs["special"] = ["<eos>"]
808
+ kwargs["lower_case"] = True
809
+ elif dataset == "lm1b":
810
+ kwargs["special"] = []
811
+ kwargs["lower_case"] = False
812
+ kwargs["vocab_file"] = os.path.join(datadir, "1b_word_vocab.txt")
813
+ elif dataset in ["enwik8", "text8"]:
814
+ pass
815
+
816
+ corpus = TransfoXLCorpus(datadir, dataset, **kwargs)
817
+ torch.save(corpus, fn)
818
+
819
+ return corpus
venv/lib/python3.10/site-packages/transformers/models/nezha/__init__.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"],
21
+ }
22
+
23
+ try:
24
+ if not is_torch_available():
25
+ raise OptionalDependencyNotAvailable()
26
+ except OptionalDependencyNotAvailable:
27
+ pass
28
+ else:
29
+ _import_structure["modeling_nezha"] = [
30
+ "NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST",
31
+ "NezhaForNextSentencePrediction",
32
+ "NezhaForMaskedLM",
33
+ "NezhaForPreTraining",
34
+ "NezhaForMultipleChoice",
35
+ "NezhaForQuestionAnswering",
36
+ "NezhaForSequenceClassification",
37
+ "NezhaForTokenClassification",
38
+ "NezhaModel",
39
+ "NezhaPreTrainedModel",
40
+ ]
41
+
42
+
43
+ if TYPE_CHECKING:
44
+ from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
45
+
46
+ try:
47
+ if not is_torch_available():
48
+ raise OptionalDependencyNotAvailable()
49
+ except OptionalDependencyNotAvailable:
50
+ pass
51
+ else:
52
+ from .modeling_nezha import (
53
+ NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
54
+ NezhaForMaskedLM,
55
+ NezhaForMultipleChoice,
56
+ NezhaForNextSentencePrediction,
57
+ NezhaForPreTraining,
58
+ NezhaForQuestionAnswering,
59
+ NezhaForSequenceClassification,
60
+ NezhaForTokenClassification,
61
+ NezhaModel,
62
+ NezhaPreTrainedModel,
63
+ )
64
+
65
+
66
+ else:
67
+ import sys
68
+
69
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/nezha/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.21 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/nezha/__pycache__/configuration_nezha.cpython-310.pyc ADDED
Binary file (4.76 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/nezha/__pycache__/modeling_nezha.cpython-310.pyc ADDED
Binary file (49.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/nezha/configuration_nezha.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ... import PretrainedConfig
2
+ from ..deprecated._archive_maps import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
3
+
4
+
5
+ class NezhaConfig(PretrainedConfig):
6
+ r"""
7
+ This is the configuration class to store the configuration of an [`NezhaModel`]. It is used to instantiate an Nezha
8
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
9
+ defaults will yield a similar configuration to that of the Nezha
10
+ [sijunhe/nezha-cn-base](https://huggingface.co/sijunhe/nezha-cn-base) architecture.
11
+
12
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
13
+ documentation from [`PretrainedConfig`] for more information.
14
+
15
+
16
+ Args:
17
+ vocab_size (`int`, optional, defaults to 21128):
18
+ Vocabulary size of the NEZHA model. Defines the different tokens that can be represented by the
19
+ *inputs_ids* passed to the forward method of [`NezhaModel`].
20
+ hidden_size (`int`, optional, defaults to 768):
21
+ Dimensionality of the encoder layers and the pooler layer.
22
+ num_hidden_layers (`int`, optional, defaults to 12):
23
+ Number of hidden layers in the Transformer encoder.
24
+ num_attention_heads (`int`, optional, defaults to 12):
25
+ Number of attention heads for each attention layer in the Transformer encoder.
26
+ intermediate_size (`int`, optional, defaults to 3072):
27
+ The dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
28
+ hidden_act (`str` or `function`, optional, defaults to "gelu"):
29
+ The non-linear activation function (function or string) in the encoder and pooler.
30
+ hidden_dropout_prob (`float`, optional, defaults to 0.1):
31
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
32
+ attention_probs_dropout_prob (`float`, optional, defaults to 0.1):
33
+ The dropout ratio for the attention probabilities.
34
+ max_position_embeddings (`int`, optional, defaults to 512):
35
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
36
+ (e.g., 512 or 1024 or 2048).
37
+ type_vocab_size (`int`, optional, defaults to 2):
38
+ The vocabulary size of the *token_type_ids* passed into [`NezhaModel`].
39
+ initializer_range (`float`, optional, defaults to 0.02):
40
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
41
+ layer_norm_eps (`float`, optional, defaults to 1e-12):
42
+ The epsilon used by the layer normalization layers.
43
+ classifier_dropout (`float`, optional, defaults to 0.1):
44
+ The dropout ratio for attached classifiers.
45
+ is_decoder (`bool`, *optional*, defaults to `False`):
46
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
47
+
48
+ Example:
49
+
50
+ ```python
51
+ >>> from transformers import NezhaConfig, NezhaModel
52
+
53
+ >>> # Initializing an Nezha configuration
54
+ >>> configuration = NezhaConfig()
55
+
56
+ >>> # Initializing a model (with random weights) from the Nezha-base style configuration model
57
+ >>> model = NezhaModel(configuration)
58
+
59
+ >>> # Accessing the model configuration
60
+ >>> configuration = model.config
61
+ ```"""
62
+
63
+ model_type = "nezha"
64
+
65
+ def __init__(
66
+ self,
67
+ vocab_size=21128,
68
+ hidden_size=768,
69
+ num_hidden_layers=12,
70
+ num_attention_heads=12,
71
+ intermediate_size=3072,
72
+ hidden_act="gelu",
73
+ hidden_dropout_prob=0.1,
74
+ attention_probs_dropout_prob=0.1,
75
+ max_position_embeddings=512,
76
+ max_relative_position=64,
77
+ type_vocab_size=2,
78
+ initializer_range=0.02,
79
+ layer_norm_eps=1e-12,
80
+ classifier_dropout=0.1,
81
+ pad_token_id=0,
82
+ bos_token_id=2,
83
+ eos_token_id=3,
84
+ use_cache=True,
85
+ **kwargs,
86
+ ):
87
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
88
+
89
+ self.vocab_size = vocab_size
90
+ self.hidden_size = hidden_size
91
+ self.num_hidden_layers = num_hidden_layers
92
+ self.num_attention_heads = num_attention_heads
93
+ self.hidden_act = hidden_act
94
+ self.intermediate_size = intermediate_size
95
+ self.hidden_dropout_prob = hidden_dropout_prob
96
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
97
+ self.max_position_embeddings = max_position_embeddings
98
+ self.max_relative_position = max_relative_position
99
+ self.type_vocab_size = type_vocab_size
100
+ self.initializer_range = initializer_range
101
+ self.layer_norm_eps = layer_norm_eps
102
+ self.classifier_dropout = classifier_dropout
103
+ self.use_cache = use_cache
venv/lib/python3.10/site-packages/transformers/models/nezha/modeling_nezha.py ADDED
@@ -0,0 +1,1693 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch Nezha model."""
16
+
17
+
18
+ import math
19
+ import os
20
+ import warnings
21
+ from dataclasses import dataclass
22
+ from typing import List, Optional, Tuple, Union
23
+
24
+ import torch
25
+ import torch.utils.checkpoint
26
+ from torch import nn
27
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
28
+
29
+ from ...activations import ACT2FN
30
+ from ...modeling_outputs import (
31
+ BaseModelOutputWithPastAndCrossAttentions,
32
+ BaseModelOutputWithPoolingAndCrossAttentions,
33
+ MaskedLMOutput,
34
+ MultipleChoiceModelOutput,
35
+ NextSentencePredictorOutput,
36
+ QuestionAnsweringModelOutput,
37
+ SequenceClassifierOutput,
38
+ TokenClassifierOutput,
39
+ )
40
+ from ...modeling_utils import PreTrainedModel
41
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
42
+ from ...utils import (
43
+ ModelOutput,
44
+ add_code_sample_docstrings,
45
+ add_start_docstrings,
46
+ add_start_docstrings_to_model_forward,
47
+ logging,
48
+ replace_return_docstrings,
49
+ )
50
+ from .configuration_nezha import NezhaConfig
51
+
52
+
53
+ logger = logging.get_logger(__name__)
54
+
55
+ _CHECKPOINT_FOR_DOC = "sijunhe/nezha-cn-base"
56
+ _CONFIG_FOR_DOC = "NezhaConfig"
57
+
58
+
59
+ from ..deprecated._archive_maps import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
60
+
61
+
62
+ def load_tf_weights_in_nezha(model, config, tf_checkpoint_path):
63
+ """Load tf checkpoints in a pytorch model."""
64
+ try:
65
+ import re
66
+
67
+ import numpy as np
68
+ import tensorflow as tf
69
+ except ImportError:
70
+ logger.error(
71
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
72
+ "https://www.tensorflow.org/install/ for installation instructions."
73
+ )
74
+ raise
75
+ tf_path = os.path.abspath(tf_checkpoint_path)
76
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
77
+ # Load weights from TF model
78
+ init_vars = tf.train.list_variables(tf_path)
79
+ names = []
80
+ arrays = []
81
+ for name, shape in init_vars:
82
+ logger.info(f"Loading TF weight {name} with shape {shape}")
83
+ array = tf.train.load_variable(tf_path, name)
84
+ names.append(name)
85
+ arrays.append(array)
86
+
87
+ for name, array in zip(names, arrays):
88
+ name = name.split("/")
89
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
90
+ # which are not required for using pretrained model
91
+ if any(
92
+ n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
93
+ for n in name
94
+ ):
95
+ logger.info(f"Skipping {'/'.join(name)}")
96
+ continue
97
+ pointer = model
98
+ for m_name in name:
99
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
100
+ scope_names = re.split(r"_(\d+)", m_name)
101
+ else:
102
+ scope_names = [m_name]
103
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
104
+ pointer = getattr(pointer, "weight")
105
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
106
+ pointer = getattr(pointer, "bias")
107
+ elif scope_names[0] == "output_weights":
108
+ pointer = getattr(pointer, "weight")
109
+ elif scope_names[0] == "squad":
110
+ pointer = getattr(pointer, "classifier")
111
+ else:
112
+ try:
113
+ pointer = getattr(pointer, scope_names[0])
114
+ except AttributeError:
115
+ logger.info(f"Skipping {'/'.join(name)}")
116
+ continue
117
+ if len(scope_names) >= 2:
118
+ num = int(scope_names[1])
119
+ pointer = pointer[num]
120
+ if m_name[-11:] == "_embeddings":
121
+ pointer = getattr(pointer, "weight")
122
+ elif m_name == "kernel":
123
+ array = np.transpose(array)
124
+ try:
125
+ if pointer.shape != array.shape:
126
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
127
+ except AssertionError as e:
128
+ e.args += (pointer.shape, array.shape)
129
+ raise
130
+ logger.info(f"Initialize PyTorch weight {name}")
131
+ pointer.data = torch.from_numpy(array)
132
+ return model
133
+
134
+
135
+ class NezhaRelativePositionsEncoding(nn.Module):
136
+ """Implement the Functional Relative Position Encoding"""
137
+
138
+ def __init__(self, length, depth, max_relative_position=127):
139
+ super().__init__()
140
+ vocab_size = max_relative_position * 2 + 1
141
+ range_vec = torch.arange(length)
142
+ range_mat = range_vec.repeat(length).view(length, length)
143
+ distance_mat = range_mat - torch.t(range_mat)
144
+ distance_mat_clipped = torch.clamp(distance_mat, -max_relative_position, max_relative_position)
145
+ final_mat = distance_mat_clipped + max_relative_position
146
+
147
+ embeddings_table = torch.zeros(vocab_size, depth)
148
+ position = torch.arange(0, vocab_size, dtype=torch.int64).float().unsqueeze(1)
149
+ div_term = torch.exp(torch.arange(0, depth, 2).float() * (-math.log(10000.0) / depth))
150
+ embeddings_table[:, 0::2] = torch.sin(position * div_term)
151
+ embeddings_table[:, 1::2] = torch.cos(position * div_term)
152
+
153
+ flat_relative_positions_matrix = final_mat.view(-1)
154
+ one_hot_relative_positions_matrix = torch.nn.functional.one_hot(
155
+ flat_relative_positions_matrix, num_classes=vocab_size
156
+ ).float()
157
+ positions_encoding = torch.matmul(one_hot_relative_positions_matrix, embeddings_table)
158
+ my_shape = list(final_mat.size())
159
+ my_shape.append(depth)
160
+ positions_encoding = positions_encoding.view(my_shape)
161
+ self.register_buffer("positions_encoding", positions_encoding, persistent=False)
162
+
163
+ def forward(self, length):
164
+ return self.positions_encoding[:length, :length, :]
165
+
166
+
167
+ class NezhaEmbeddings(nn.Module):
168
+ """Construct the embeddings from word and token_type embeddings."""
169
+
170
+ def __init__(self, config):
171
+ super().__init__()
172
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
173
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
174
+
175
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
176
+ # any TensorFlow checkpoint file
177
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
178
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
179
+ self.register_buffer(
180
+ "token_type_ids", torch.zeros((1, config.max_position_embeddings), dtype=torch.long), persistent=False
181
+ )
182
+
183
+ def forward(
184
+ self,
185
+ input_ids: Optional[torch.LongTensor] = None,
186
+ token_type_ids: Optional[torch.LongTensor] = None,
187
+ inputs_embeds: Optional[torch.FloatTensor] = None,
188
+ ) -> torch.Tensor:
189
+ if input_ids is not None:
190
+ input_shape = input_ids.size()
191
+ else:
192
+ input_shape = inputs_embeds.size()[:-1]
193
+
194
+ seq_length = input_shape[1]
195
+
196
+ if inputs_embeds is None:
197
+ inputs_embeds = self.word_embeddings(input_ids)
198
+
199
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
200
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
201
+ # issue #5664
202
+ if token_type_ids is None:
203
+ if hasattr(self, "token_type_ids"):
204
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
205
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
206
+ token_type_ids = buffered_token_type_ids_expanded
207
+ else:
208
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=inputs_embeds.device)
209
+
210
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
211
+
212
+ embeddings = inputs_embeds + token_type_embeddings
213
+ embeddings = self.LayerNorm(embeddings)
214
+ embeddings = self.dropout(embeddings)
215
+ return embeddings
216
+
217
+
218
+ class NezhaSelfAttention(nn.Module):
219
+ def __init__(self, config):
220
+ super().__init__()
221
+ if config.hidden_size % config.num_attention_heads != 0:
222
+ raise ValueError(
223
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
224
+ f"heads ({config.num_attention_heads})"
225
+ )
226
+
227
+ self.num_attention_heads = config.num_attention_heads
228
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
229
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
230
+
231
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
232
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
233
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
234
+
235
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
236
+ self.relative_positions_encoding = NezhaRelativePositionsEncoding(
237
+ length=config.max_position_embeddings,
238
+ depth=self.attention_head_size,
239
+ max_relative_position=config.max_relative_position,
240
+ )
241
+ self.is_decoder = config.is_decoder
242
+
243
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
244
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
245
+ x = x.view(new_x_shape)
246
+ return x.permute(0, 2, 1, 3)
247
+
248
+ def forward(
249
+ self,
250
+ hidden_states: torch.Tensor,
251
+ attention_mask: Optional[torch.FloatTensor] = None,
252
+ head_mask: Optional[torch.FloatTensor] = None,
253
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
254
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
255
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
256
+ output_attentions: Optional[bool] = False,
257
+ ) -> Tuple[torch.Tensor]:
258
+ mixed_query_layer = self.query(hidden_states)
259
+
260
+ # If this is instantiated as a cross-attention module, the keys
261
+ # and values come from an encoder; the attention mask needs to be
262
+ # such that the encoder's padding tokens are not attended to.
263
+ is_cross_attention = encoder_hidden_states is not None
264
+
265
+ if is_cross_attention and past_key_value is not None:
266
+ # reuse k,v, cross_attentions
267
+ key_layer = past_key_value[0]
268
+ value_layer = past_key_value[1]
269
+ attention_mask = encoder_attention_mask
270
+ elif is_cross_attention:
271
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
272
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
273
+ attention_mask = encoder_attention_mask
274
+ elif past_key_value is not None:
275
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
276
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
277
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
278
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
279
+ else:
280
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
281
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
282
+
283
+ query_layer = self.transpose_for_scores(mixed_query_layer)
284
+
285
+ if self.is_decoder:
286
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
287
+ # Further calls to cross_attention layer can then reuse all cross-attention
288
+ # key/value_states (first "if" case)
289
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
290
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
291
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
292
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
293
+ past_key_value = (key_layer, value_layer)
294
+
295
+ # Take the dot product between "query" and "key" to get the raw attention scores.
296
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
297
+
298
+ batch_size, num_attention_heads, from_seq_length, to_seq_length = attention_scores.size()
299
+ relations_keys = self.relative_positions_encoding(to_seq_length)
300
+ query_layer_t = query_layer.permute(2, 0, 1, 3)
301
+
302
+ query_layer_r = query_layer_t.contiguous().view(
303
+ from_seq_length, batch_size * num_attention_heads, self.attention_head_size
304
+ )
305
+ key_position_scores = torch.matmul(query_layer_r, relations_keys.permute(0, 2, 1))
306
+ key_position_scores_r = key_position_scores.view(
307
+ from_seq_length, batch_size, num_attention_heads, from_seq_length
308
+ )
309
+ key_position_scores_r_t = key_position_scores_r.permute(1, 2, 0, 3)
310
+ attention_scores = attention_scores + key_position_scores_r_t
311
+
312
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
313
+
314
+ if attention_mask is not None:
315
+ # Apply the attention mask is (precomputed for all layers in NezhaModel forward() function)
316
+ attention_scores = attention_scores + attention_mask
317
+
318
+ # Normalize the attention scores to probabilities.
319
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
320
+
321
+ # This is actually dropping out entire tokens to attend to, which might
322
+ # seem a bit unusual, but is taken from the original Transformer paper.
323
+ attention_probs = self.dropout(attention_probs)
324
+
325
+ # Mask heads if we want to
326
+ if head_mask is not None:
327
+ attention_probs = attention_probs * head_mask
328
+
329
+ context_layer = torch.matmul(attention_probs, value_layer)
330
+ relations_values = self.relative_positions_encoding(to_seq_length)
331
+ attention_probs_t = attention_probs.permute(2, 0, 1, 3)
332
+ attentions_probs_r = attention_probs_t.contiguous().view(
333
+ from_seq_length, batch_size * num_attention_heads, to_seq_length
334
+ )
335
+ value_position_scores = torch.matmul(attentions_probs_r, relations_values)
336
+ value_position_scores_r = value_position_scores.view(
337
+ from_seq_length, batch_size, num_attention_heads, self.attention_head_size
338
+ )
339
+ value_position_scores_r_t = value_position_scores_r.permute(1, 2, 0, 3)
340
+ context_layer = context_layer + value_position_scores_r_t
341
+
342
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
343
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
344
+ context_layer = context_layer.view(new_context_layer_shape)
345
+
346
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
347
+
348
+ if self.is_decoder:
349
+ outputs = outputs + (past_key_value,)
350
+ return outputs
351
+
352
+
353
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Nezha
354
+ class NezhaSelfOutput(nn.Module):
355
+ def __init__(self, config):
356
+ super().__init__()
357
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
358
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
359
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
360
+
361
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
362
+ hidden_states = self.dense(hidden_states)
363
+ hidden_states = self.dropout(hidden_states)
364
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
365
+ return hidden_states
366
+
367
+
368
+ class NezhaAttention(nn.Module):
369
+ def __init__(self, config):
370
+ super().__init__()
371
+ self.self = NezhaSelfAttention(config)
372
+ self.output = NezhaSelfOutput(config)
373
+ self.pruned_heads = set()
374
+
375
+ def prune_heads(self, heads):
376
+ if len(heads) == 0:
377
+ return
378
+ heads, index = find_pruneable_heads_and_indices(
379
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
380
+ )
381
+
382
+ # Prune linear layers
383
+ self.self.query = prune_linear_layer(self.self.query, index)
384
+ self.self.key = prune_linear_layer(self.self.key, index)
385
+ self.self.value = prune_linear_layer(self.self.value, index)
386
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
387
+
388
+ # Update hyper params and store pruned heads
389
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
390
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
391
+ self.pruned_heads = self.pruned_heads.union(heads)
392
+
393
+ def forward(
394
+ self,
395
+ hidden_states: torch.Tensor,
396
+ attention_mask: Optional[torch.FloatTensor] = None,
397
+ head_mask: Optional[torch.FloatTensor] = None,
398
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
399
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
400
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
401
+ output_attentions: Optional[bool] = False,
402
+ ) -> Tuple[torch.Tensor]:
403
+ self_outputs = self.self(
404
+ hidden_states,
405
+ attention_mask,
406
+ head_mask,
407
+ encoder_hidden_states,
408
+ encoder_attention_mask,
409
+ past_key_value,
410
+ output_attentions,
411
+ )
412
+ attention_output = self.output(self_outputs[0], hidden_states)
413
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
414
+ return outputs
415
+
416
+
417
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Nezha
418
+ class NezhaIntermediate(nn.Module):
419
+ def __init__(self, config):
420
+ super().__init__()
421
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
422
+ if isinstance(config.hidden_act, str):
423
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
424
+ else:
425
+ self.intermediate_act_fn = config.hidden_act
426
+
427
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
428
+ hidden_states = self.dense(hidden_states)
429
+ hidden_states = self.intermediate_act_fn(hidden_states)
430
+ return hidden_states
431
+
432
+
433
+ # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Nezha
434
+ class NezhaOutput(nn.Module):
435
+ def __init__(self, config):
436
+ super().__init__()
437
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
438
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
439
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
440
+
441
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
442
+ hidden_states = self.dense(hidden_states)
443
+ hidden_states = self.dropout(hidden_states)
444
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
445
+ return hidden_states
446
+
447
+
448
+ class NezhaLayer(nn.Module):
449
+ def __init__(self, config):
450
+ super().__init__()
451
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
452
+ self.seq_len_dim = 1
453
+ self.attention = NezhaAttention(config)
454
+ self.is_decoder = config.is_decoder
455
+ self.add_cross_attention = config.add_cross_attention
456
+ if self.add_cross_attention:
457
+ if not self.is_decoder:
458
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
459
+ self.crossattention = NezhaAttention(config)
460
+ self.intermediate = NezhaIntermediate(config)
461
+ self.output = NezhaOutput(config)
462
+
463
+ def forward(
464
+ self,
465
+ hidden_states: torch.Tensor,
466
+ attention_mask: Optional[torch.FloatTensor] = None,
467
+ head_mask: Optional[torch.FloatTensor] = None,
468
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
469
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
470
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
471
+ output_attentions: Optional[bool] = False,
472
+ ) -> Tuple[torch.Tensor]:
473
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
474
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
475
+ self_attention_outputs = self.attention(
476
+ hidden_states,
477
+ attention_mask,
478
+ head_mask,
479
+ output_attentions=output_attentions,
480
+ past_key_value=self_attn_past_key_value,
481
+ )
482
+ attention_output = self_attention_outputs[0]
483
+
484
+ # if decoder, the last output is tuple of self-attn cache
485
+ if self.is_decoder:
486
+ outputs = self_attention_outputs[1:-1]
487
+ present_key_value = self_attention_outputs[-1]
488
+ else:
489
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
490
+
491
+ cross_attn_present_key_value = None
492
+ if self.is_decoder and encoder_hidden_states is not None:
493
+ if not hasattr(self, "crossattention"):
494
+ raise ValueError(
495
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
496
+ " by setting `config.add_cross_attention=True`"
497
+ )
498
+
499
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
500
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
501
+ cross_attention_outputs = self.crossattention(
502
+ attention_output,
503
+ attention_mask,
504
+ head_mask,
505
+ encoder_hidden_states,
506
+ encoder_attention_mask,
507
+ cross_attn_past_key_value,
508
+ output_attentions,
509
+ )
510
+ attention_output = cross_attention_outputs[0]
511
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
512
+
513
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
514
+ cross_attn_present_key_value = cross_attention_outputs[-1]
515
+ present_key_value = present_key_value + cross_attn_present_key_value
516
+
517
+ layer_output = apply_chunking_to_forward(
518
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
519
+ )
520
+ outputs = (layer_output,) + outputs
521
+
522
+ # if decoder, return the attn key/values as the last output
523
+ if self.is_decoder:
524
+ outputs = outputs + (present_key_value,)
525
+
526
+ return outputs
527
+
528
+ def feed_forward_chunk(self, attention_output):
529
+ intermediate_output = self.intermediate(attention_output)
530
+ layer_output = self.output(intermediate_output, attention_output)
531
+ return layer_output
532
+
533
+
534
+ # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Nezha
535
+ class NezhaEncoder(nn.Module):
536
+ def __init__(self, config):
537
+ super().__init__()
538
+ self.config = config
539
+ self.layer = nn.ModuleList([NezhaLayer(config) for _ in range(config.num_hidden_layers)])
540
+ self.gradient_checkpointing = False
541
+
542
+ def forward(
543
+ self,
544
+ hidden_states: torch.Tensor,
545
+ attention_mask: Optional[torch.FloatTensor] = None,
546
+ head_mask: Optional[torch.FloatTensor] = None,
547
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
548
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
549
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
550
+ use_cache: Optional[bool] = None,
551
+ output_attentions: Optional[bool] = False,
552
+ output_hidden_states: Optional[bool] = False,
553
+ return_dict: Optional[bool] = True,
554
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
555
+ all_hidden_states = () if output_hidden_states else None
556
+ all_self_attentions = () if output_attentions else None
557
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
558
+
559
+ if self.gradient_checkpointing and self.training:
560
+ if use_cache:
561
+ logger.warning_once(
562
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
563
+ )
564
+ use_cache = False
565
+
566
+ next_decoder_cache = () if use_cache else None
567
+ for i, layer_module in enumerate(self.layer):
568
+ if output_hidden_states:
569
+ all_hidden_states = all_hidden_states + (hidden_states,)
570
+
571
+ layer_head_mask = head_mask[i] if head_mask is not None else None
572
+ past_key_value = past_key_values[i] if past_key_values is not None else None
573
+
574
+ if self.gradient_checkpointing and self.training:
575
+ layer_outputs = self._gradient_checkpointing_func(
576
+ layer_module.__call__,
577
+ hidden_states,
578
+ attention_mask,
579
+ layer_head_mask,
580
+ encoder_hidden_states,
581
+ encoder_attention_mask,
582
+ past_key_value,
583
+ output_attentions,
584
+ )
585
+ else:
586
+ layer_outputs = layer_module(
587
+ hidden_states,
588
+ attention_mask,
589
+ layer_head_mask,
590
+ encoder_hidden_states,
591
+ encoder_attention_mask,
592
+ past_key_value,
593
+ output_attentions,
594
+ )
595
+
596
+ hidden_states = layer_outputs[0]
597
+ if use_cache:
598
+ next_decoder_cache += (layer_outputs[-1],)
599
+ if output_attentions:
600
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
601
+ if self.config.add_cross_attention:
602
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
603
+
604
+ if output_hidden_states:
605
+ all_hidden_states = all_hidden_states + (hidden_states,)
606
+
607
+ if not return_dict:
608
+ return tuple(
609
+ v
610
+ for v in [
611
+ hidden_states,
612
+ next_decoder_cache,
613
+ all_hidden_states,
614
+ all_self_attentions,
615
+ all_cross_attentions,
616
+ ]
617
+ if v is not None
618
+ )
619
+ return BaseModelOutputWithPastAndCrossAttentions(
620
+ last_hidden_state=hidden_states,
621
+ past_key_values=next_decoder_cache,
622
+ hidden_states=all_hidden_states,
623
+ attentions=all_self_attentions,
624
+ cross_attentions=all_cross_attentions,
625
+ )
626
+
627
+
628
+ # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->Nezha
629
+ class NezhaPooler(nn.Module):
630
+ def __init__(self, config):
631
+ super().__init__()
632
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
633
+ self.activation = nn.Tanh()
634
+
635
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
636
+ # We "pool" the model by simply taking the hidden state corresponding
637
+ # to the first token.
638
+ first_token_tensor = hidden_states[:, 0]
639
+ pooled_output = self.dense(first_token_tensor)
640
+ pooled_output = self.activation(pooled_output)
641
+ return pooled_output
642
+
643
+
644
+ # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->Nezha
645
+ class NezhaPredictionHeadTransform(nn.Module):
646
+ def __init__(self, config):
647
+ super().__init__()
648
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
649
+ if isinstance(config.hidden_act, str):
650
+ self.transform_act_fn = ACT2FN[config.hidden_act]
651
+ else:
652
+ self.transform_act_fn = config.hidden_act
653
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
654
+
655
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
656
+ hidden_states = self.dense(hidden_states)
657
+ hidden_states = self.transform_act_fn(hidden_states)
658
+ hidden_states = self.LayerNorm(hidden_states)
659
+ return hidden_states
660
+
661
+
662
+ # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->Nezha
663
+ class NezhaLMPredictionHead(nn.Module):
664
+ def __init__(self, config):
665
+ super().__init__()
666
+ self.transform = NezhaPredictionHeadTransform(config)
667
+
668
+ # The output weights are the same as the input embeddings, but there is
669
+ # an output-only bias for each token.
670
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
671
+
672
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
673
+
674
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
675
+ self.decoder.bias = self.bias
676
+
677
+ def forward(self, hidden_states):
678
+ hidden_states = self.transform(hidden_states)
679
+ hidden_states = self.decoder(hidden_states)
680
+ return hidden_states
681
+
682
+
683
+ # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->Nezha
684
+ class NezhaOnlyMLMHead(nn.Module):
685
+ def __init__(self, config):
686
+ super().__init__()
687
+ self.predictions = NezhaLMPredictionHead(config)
688
+
689
+ def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
690
+ prediction_scores = self.predictions(sequence_output)
691
+ return prediction_scores
692
+
693
+
694
+ # Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->Nezha
695
+ class NezhaOnlyNSPHead(nn.Module):
696
+ def __init__(self, config):
697
+ super().__init__()
698
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
699
+
700
+ def forward(self, pooled_output):
701
+ seq_relationship_score = self.seq_relationship(pooled_output)
702
+ return seq_relationship_score
703
+
704
+
705
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->Nezha
706
+ class NezhaPreTrainingHeads(nn.Module):
707
+ def __init__(self, config):
708
+ super().__init__()
709
+ self.predictions = NezhaLMPredictionHead(config)
710
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
711
+
712
+ def forward(self, sequence_output, pooled_output):
713
+ prediction_scores = self.predictions(sequence_output)
714
+ seq_relationship_score = self.seq_relationship(pooled_output)
715
+ return prediction_scores, seq_relationship_score
716
+
717
+
718
+ class NezhaPreTrainedModel(PreTrainedModel):
719
+ """
720
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
721
+ models.
722
+ """
723
+
724
+ config_class = NezhaConfig
725
+ load_tf_weights = load_tf_weights_in_nezha
726
+ base_model_prefix = "nezha"
727
+ supports_gradient_checkpointing = True
728
+
729
+ def _init_weights(self, module):
730
+ """Initialize the weights"""
731
+ if isinstance(module, nn.Linear):
732
+ # Slightly different from the TF version which uses truncated_normal for initialization
733
+ # cf https://github.com/pytorch/pytorch/pull/5617
734
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
735
+ if module.bias is not None:
736
+ module.bias.data.zero_()
737
+ elif isinstance(module, nn.Embedding):
738
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
739
+ if module.padding_idx is not None:
740
+ module.weight.data[module.padding_idx].zero_()
741
+ elif isinstance(module, nn.LayerNorm):
742
+ module.bias.data.zero_()
743
+ module.weight.data.fill_(1.0)
744
+
745
+
746
+ @dataclass
747
+ class NezhaForPreTrainingOutput(ModelOutput):
748
+ """
749
+ Output type of [`NezhaForPreTraining`].
750
+
751
+ Args:
752
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
753
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
754
+ (classification) loss.
755
+ prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
756
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
757
+ seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
758
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
759
+ before SoftMax).
760
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
761
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
762
+ shape `(batch_size, sequence_length, hidden_size)`.
763
+
764
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
765
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
766
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
767
+ sequence_length)`.
768
+
769
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
770
+ heads.
771
+ """
772
+
773
+ loss: Optional[torch.FloatTensor] = None
774
+ prediction_logits: torch.FloatTensor = None
775
+ seq_relationship_logits: torch.FloatTensor = None
776
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
777
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
778
+
779
+
780
+ NEZHA_START_DOCSTRING = r"""
781
+
782
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
783
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
784
+ etc.)
785
+
786
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
787
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
788
+ and behavior.
789
+
790
+ Parameters:
791
+ config ([`NezhaConfig`]): Model configuration class with all the parameters of the model.
792
+ Initializing with a config file does not load the weights associated with the model, only the
793
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
794
+ """
795
+
796
+ NEZHA_INPUTS_DOCSTRING = r"""
797
+ Args:
798
+ input_ids (`torch.LongTensor` of shape `({0})`):
799
+ Indices of input sequence tokens in the vocabulary.
800
+
801
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
802
+ [`PreTrainedTokenizer.__call__`] for details.
803
+
804
+ [What are input IDs?](../glossary#input-ids)
805
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
806
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
807
+
808
+ - 1 for tokens that are **not masked**,
809
+ - 0 for tokens that are **masked**.
810
+
811
+ [What are attention masks?](../glossary#attention-mask)
812
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
813
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
814
+ 1]`:
815
+
816
+ - 0 corresponds to a *sentence A* token,
817
+ - 1 corresponds to a *sentence B* token.
818
+
819
+ [What are token type IDs?](../glossary#token-type-ids)
820
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
821
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
822
+
823
+ - 1 indicates the head is **not masked**,
824
+ - 0 indicates the head is **masked**.
825
+
826
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
827
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
828
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
829
+ model's internal embedding lookup matrix.
830
+ output_attentions (`bool`, *optional*):
831
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
832
+ tensors for more detail.
833
+ output_hidden_states (`bool`, *optional*):
834
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
835
+ more detail.
836
+ return_dict (`bool`, *optional*):
837
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
838
+ """
839
+
840
+
841
+ @add_start_docstrings(
842
+ "The bare Nezha Model transformer outputting raw hidden-states without any specific head on top.",
843
+ NEZHA_START_DOCSTRING,
844
+ )
845
+ class NezhaModel(NezhaPreTrainedModel):
846
+ """
847
+
848
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
849
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
850
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
851
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
852
+
853
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
854
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
855
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
856
+ """
857
+
858
+ def __init__(self, config, add_pooling_layer=True):
859
+ super().__init__(config)
860
+ self.config = config
861
+
862
+ self.embeddings = NezhaEmbeddings(config)
863
+ self.encoder = NezhaEncoder(config)
864
+
865
+ self.pooler = NezhaPooler(config) if add_pooling_layer else None
866
+
867
+ # Initialize weights and apply final processing
868
+ self.post_init()
869
+
870
+ def get_input_embeddings(self):
871
+ return self.embeddings.word_embeddings
872
+
873
+ def set_input_embeddings(self, value):
874
+ self.embeddings.word_embeddings = value
875
+
876
+ def _prune_heads(self, heads_to_prune):
877
+ """
878
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
879
+ class PreTrainedModel
880
+ """
881
+ for layer, heads in heads_to_prune.items():
882
+ self.encoder.layer[layer].attention.prune_heads(heads)
883
+
884
+ @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
885
+ @add_code_sample_docstrings(
886
+ checkpoint=_CHECKPOINT_FOR_DOC,
887
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
888
+ config_class=_CONFIG_FOR_DOC,
889
+ )
890
+ def forward(
891
+ self,
892
+ input_ids: Optional[torch.Tensor] = None,
893
+ attention_mask: Optional[torch.Tensor] = None,
894
+ token_type_ids: Optional[torch.Tensor] = None,
895
+ head_mask: Optional[torch.Tensor] = None,
896
+ inputs_embeds: Optional[torch.Tensor] = None,
897
+ encoder_hidden_states: Optional[torch.Tensor] = None,
898
+ encoder_attention_mask: Optional[torch.Tensor] = None,
899
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
900
+ use_cache: Optional[bool] = None,
901
+ output_attentions: Optional[bool] = None,
902
+ output_hidden_states: Optional[bool] = None,
903
+ return_dict: Optional[bool] = None,
904
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
905
+ r"""
906
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
907
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
908
+ the model is configured as a decoder.
909
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
910
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
911
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
912
+
913
+ - 1 for tokens that are **not masked**,
914
+ - 0 for tokens that are **masked**.
915
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
916
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
917
+
918
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
919
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
920
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
921
+ use_cache (`bool`, *optional*):
922
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
923
+ `past_key_values`).
924
+ """
925
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
926
+ output_hidden_states = (
927
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
928
+ )
929
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
930
+
931
+ if self.config.is_decoder:
932
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
933
+ else:
934
+ use_cache = False
935
+
936
+ if input_ids is not None and inputs_embeds is not None:
937
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
938
+ elif input_ids is not None:
939
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
940
+ input_shape = input_ids.size()
941
+ elif inputs_embeds is not None:
942
+ input_shape = inputs_embeds.size()[:-1]
943
+ else:
944
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
945
+
946
+ batch_size, seq_length = input_shape
947
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
948
+
949
+ # past_key_values_length
950
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
951
+
952
+ if attention_mask is None:
953
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
954
+
955
+ if token_type_ids is None:
956
+ if hasattr(self.embeddings, "token_type_ids"):
957
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
958
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
959
+ token_type_ids = buffered_token_type_ids_expanded
960
+ else:
961
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
962
+
963
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
964
+ # ourselves in which case we just need to make it broadcastable to all heads.
965
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
966
+
967
+ # If a 2D or 3D attention mask is provided for the cross-attention
968
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
969
+ if self.config.is_decoder and encoder_hidden_states is not None:
970
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
971
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
972
+ if encoder_attention_mask is None:
973
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
974
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
975
+ else:
976
+ encoder_extended_attention_mask = None
977
+
978
+ # Prepare head mask if needed
979
+ # 1.0 in head_mask indicate we keep the head
980
+ # attention_probs has shape bsz x n_heads x N x N
981
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
982
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
983
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
984
+
985
+ embedding_output = self.embeddings(
986
+ input_ids=input_ids,
987
+ token_type_ids=token_type_ids,
988
+ inputs_embeds=inputs_embeds,
989
+ )
990
+ encoder_outputs = self.encoder(
991
+ embedding_output,
992
+ attention_mask=extended_attention_mask,
993
+ head_mask=head_mask,
994
+ encoder_hidden_states=encoder_hidden_states,
995
+ encoder_attention_mask=encoder_extended_attention_mask,
996
+ past_key_values=past_key_values,
997
+ use_cache=use_cache,
998
+ output_attentions=output_attentions,
999
+ output_hidden_states=output_hidden_states,
1000
+ return_dict=return_dict,
1001
+ )
1002
+ sequence_output = encoder_outputs[0]
1003
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
1004
+
1005
+ if not return_dict:
1006
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
1007
+
1008
+ return BaseModelOutputWithPoolingAndCrossAttentions(
1009
+ last_hidden_state=sequence_output,
1010
+ pooler_output=pooled_output,
1011
+ past_key_values=encoder_outputs.past_key_values,
1012
+ hidden_states=encoder_outputs.hidden_states,
1013
+ attentions=encoder_outputs.attentions,
1014
+ cross_attentions=encoder_outputs.cross_attentions,
1015
+ )
1016
+
1017
+
1018
+ @add_start_docstrings(
1019
+ """
1020
+ Nezha Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
1021
+ sentence prediction (classification)` head.
1022
+ """,
1023
+ NEZHA_START_DOCSTRING,
1024
+ )
1025
+ class NezhaForPreTraining(NezhaPreTrainedModel):
1026
+ _tied_weights_keys = ["cls.predictions.decoder"]
1027
+
1028
+ def __init__(self, config):
1029
+ super().__init__(config)
1030
+
1031
+ self.nezha = NezhaModel(config)
1032
+ self.cls = NezhaPreTrainingHeads(config)
1033
+
1034
+ # Initialize weights and apply final processing
1035
+ self.post_init()
1036
+
1037
+ def get_output_embeddings(self):
1038
+ return self.cls.predictions.decoder
1039
+
1040
+ def set_output_embeddings(self, new_embeddings):
1041
+ self.cls.predictions.decoder = new_embeddings
1042
+
1043
+ @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1044
+ @replace_return_docstrings(output_type=NezhaForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
1045
+ def forward(
1046
+ self,
1047
+ input_ids: Optional[torch.Tensor] = None,
1048
+ attention_mask: Optional[torch.Tensor] = None,
1049
+ token_type_ids: Optional[torch.Tensor] = None,
1050
+ head_mask: Optional[torch.Tensor] = None,
1051
+ inputs_embeds: Optional[torch.Tensor] = None,
1052
+ labels: Optional[torch.Tensor] = None,
1053
+ next_sentence_label: Optional[torch.Tensor] = None,
1054
+ output_attentions: Optional[bool] = None,
1055
+ output_hidden_states: Optional[bool] = None,
1056
+ return_dict: Optional[bool] = None,
1057
+ ) -> Union[Tuple[torch.Tensor], NezhaForPreTrainingOutput]:
1058
+ r"""
1059
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1060
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1061
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked),
1062
+ the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1063
+ next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1064
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence
1065
+ pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
1066
+
1067
+ - 0 indicates sequence B is a continuation of sequence A,
1068
+ - 1 indicates sequence B is a random sequence.
1069
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
1070
+ Used to hide legacy arguments that have been deprecated.
1071
+
1072
+ Returns:
1073
+
1074
+ Example:
1075
+
1076
+ ```python
1077
+ >>> from transformers import AutoTokenizer, NezhaForPreTraining
1078
+ >>> import torch
1079
+
1080
+ >>> tokenizer = AutoTokenizer.from_pretrained("sijunhe/nezha-cn-base")
1081
+ >>> model = NezhaForPreTraining.from_pretrained("sijunhe/nezha-cn-base")
1082
+
1083
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1084
+ >>> outputs = model(**inputs)
1085
+
1086
+ >>> prediction_logits = outputs.prediction_logits
1087
+ >>> seq_relationship_logits = outputs.seq_relationship_logits
1088
+ ```
1089
+ """
1090
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1091
+
1092
+ outputs = self.nezha(
1093
+ input_ids,
1094
+ attention_mask=attention_mask,
1095
+ token_type_ids=token_type_ids,
1096
+ head_mask=head_mask,
1097
+ inputs_embeds=inputs_embeds,
1098
+ output_attentions=output_attentions,
1099
+ output_hidden_states=output_hidden_states,
1100
+ return_dict=return_dict,
1101
+ )
1102
+
1103
+ sequence_output, pooled_output = outputs[:2]
1104
+ prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
1105
+
1106
+ total_loss = None
1107
+ if labels is not None and next_sentence_label is not None:
1108
+ loss_fct = CrossEntropyLoss()
1109
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1110
+ next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
1111
+ total_loss = masked_lm_loss + next_sentence_loss
1112
+
1113
+ if not return_dict:
1114
+ output = (prediction_scores, seq_relationship_score) + outputs[2:]
1115
+ return ((total_loss,) + output) if total_loss is not None else output
1116
+
1117
+ return NezhaForPreTrainingOutput(
1118
+ loss=total_loss,
1119
+ prediction_logits=prediction_scores,
1120
+ seq_relationship_logits=seq_relationship_score,
1121
+ hidden_states=outputs.hidden_states,
1122
+ attentions=outputs.attentions,
1123
+ )
1124
+
1125
+
1126
+ @add_start_docstrings("""Nezha Model with a `language modeling` head on top.""", NEZHA_START_DOCSTRING)
1127
+ class NezhaForMaskedLM(NezhaPreTrainedModel):
1128
+ _tied_weights_keys = ["cls.predictions.decoder"]
1129
+
1130
+ def __init__(self, config):
1131
+ super().__init__(config)
1132
+
1133
+ if config.is_decoder:
1134
+ logger.warning(
1135
+ "If you want to use `NezhaForMaskedLM` make sure `config.is_decoder=False` for "
1136
+ "bi-directional self-attention."
1137
+ )
1138
+
1139
+ self.nezha = NezhaModel(config, add_pooling_layer=False)
1140
+ self.cls = NezhaOnlyMLMHead(config)
1141
+
1142
+ # Initialize weights and apply final processing
1143
+ self.post_init()
1144
+
1145
+ def get_output_embeddings(self):
1146
+ return self.cls.predictions.decoder
1147
+
1148
+ def set_output_embeddings(self, new_embeddings):
1149
+ self.cls.predictions.decoder = new_embeddings
1150
+
1151
+ @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1152
+ @add_code_sample_docstrings(
1153
+ checkpoint=_CHECKPOINT_FOR_DOC,
1154
+ output_type=MaskedLMOutput,
1155
+ config_class=_CONFIG_FOR_DOC,
1156
+ )
1157
+ def forward(
1158
+ self,
1159
+ input_ids: Optional[torch.Tensor] = None,
1160
+ attention_mask: Optional[torch.Tensor] = None,
1161
+ token_type_ids: Optional[torch.Tensor] = None,
1162
+ head_mask: Optional[torch.Tensor] = None,
1163
+ inputs_embeds: Optional[torch.Tensor] = None,
1164
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1165
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1166
+ labels: Optional[torch.Tensor] = None,
1167
+ output_attentions: Optional[bool] = None,
1168
+ output_hidden_states: Optional[bool] = None,
1169
+ return_dict: Optional[bool] = None,
1170
+ ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
1171
+ r"""
1172
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1173
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1174
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1175
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1176
+ """
1177
+
1178
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1179
+
1180
+ outputs = self.nezha(
1181
+ input_ids,
1182
+ attention_mask=attention_mask,
1183
+ token_type_ids=token_type_ids,
1184
+ head_mask=head_mask,
1185
+ inputs_embeds=inputs_embeds,
1186
+ encoder_hidden_states=encoder_hidden_states,
1187
+ encoder_attention_mask=encoder_attention_mask,
1188
+ output_attentions=output_attentions,
1189
+ output_hidden_states=output_hidden_states,
1190
+ return_dict=return_dict,
1191
+ )
1192
+
1193
+ sequence_output = outputs[0]
1194
+ prediction_scores = self.cls(sequence_output)
1195
+
1196
+ masked_lm_loss = None
1197
+ if labels is not None:
1198
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
1199
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1200
+
1201
+ if not return_dict:
1202
+ output = (prediction_scores,) + outputs[2:]
1203
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1204
+
1205
+ return MaskedLMOutput(
1206
+ loss=masked_lm_loss,
1207
+ logits=prediction_scores,
1208
+ hidden_states=outputs.hidden_states,
1209
+ attentions=outputs.attentions,
1210
+ )
1211
+
1212
+ def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
1213
+ input_shape = input_ids.shape
1214
+ effective_batch_size = input_shape[0]
1215
+
1216
+ # add a dummy token
1217
+ if self.config.pad_token_id is None:
1218
+ raise ValueError("The PAD token should be defined for generation")
1219
+
1220
+ attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
1221
+ dummy_token = torch.full(
1222
+ (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
1223
+ )
1224
+ input_ids = torch.cat([input_ids, dummy_token], dim=1)
1225
+
1226
+ return {"input_ids": input_ids, "attention_mask": attention_mask}
1227
+
1228
+
1229
+ @add_start_docstrings(
1230
+ """Nezha Model with a `next sentence prediction (classification)` head on top.""",
1231
+ NEZHA_START_DOCSTRING,
1232
+ )
1233
+ class NezhaForNextSentencePrediction(NezhaPreTrainedModel):
1234
+ def __init__(self, config):
1235
+ super().__init__(config)
1236
+
1237
+ self.nezha = NezhaModel(config)
1238
+ self.cls = NezhaOnlyNSPHead(config)
1239
+
1240
+ # Initialize weights and apply final processing
1241
+ self.post_init()
1242
+
1243
+ @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1244
+ @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
1245
+ def forward(
1246
+ self,
1247
+ input_ids: Optional[torch.Tensor] = None,
1248
+ attention_mask: Optional[torch.Tensor] = None,
1249
+ token_type_ids: Optional[torch.Tensor] = None,
1250
+ head_mask: Optional[torch.Tensor] = None,
1251
+ inputs_embeds: Optional[torch.Tensor] = None,
1252
+ labels: Optional[torch.Tensor] = None,
1253
+ output_attentions: Optional[bool] = None,
1254
+ output_hidden_states: Optional[bool] = None,
1255
+ return_dict: Optional[bool] = None,
1256
+ **kwargs,
1257
+ ) -> Union[Tuple[torch.Tensor], NextSentencePredictorOutput]:
1258
+ r"""
1259
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1260
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
1261
+ (see `input_ids` docstring). Indices should be in `[0, 1]`:
1262
+
1263
+ - 0 indicates sequence B is a continuation of sequence A,
1264
+ - 1 indicates sequence B is a random sequence.
1265
+
1266
+ Returns:
1267
+
1268
+ Example:
1269
+
1270
+ ```python
1271
+ >>> from transformers import AutoTokenizer, NezhaForNextSentencePrediction
1272
+ >>> import torch
1273
+
1274
+ >>> tokenizer = AutoTokenizer.from_pretrained("sijunhe/nezha-cn-base")
1275
+ >>> model = NezhaForNextSentencePrediction.from_pretrained("sijunhe/nezha-cn-base")
1276
+
1277
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
1278
+ >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
1279
+ >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
1280
+
1281
+ >>> outputs = model(**encoding, labels=torch.LongTensor([1]))
1282
+ >>> logits = outputs.logits
1283
+ >>> assert logits[0, 0] < logits[0, 1] # next sentence was random
1284
+ ```
1285
+ """
1286
+
1287
+ if "next_sentence_label" in kwargs:
1288
+ warnings.warn(
1289
+ "The `next_sentence_label` argument is deprecated and will be removed in a future version, use"
1290
+ " `labels` instead.",
1291
+ FutureWarning,
1292
+ )
1293
+ labels = kwargs.pop("next_sentence_label")
1294
+
1295
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1296
+
1297
+ outputs = self.nezha(
1298
+ input_ids,
1299
+ attention_mask=attention_mask,
1300
+ token_type_ids=token_type_ids,
1301
+ head_mask=head_mask,
1302
+ inputs_embeds=inputs_embeds,
1303
+ output_attentions=output_attentions,
1304
+ output_hidden_states=output_hidden_states,
1305
+ return_dict=return_dict,
1306
+ )
1307
+
1308
+ pooled_output = outputs[1]
1309
+
1310
+ seq_relationship_scores = self.cls(pooled_output)
1311
+
1312
+ next_sentence_loss = None
1313
+ if labels is not None:
1314
+ loss_fct = CrossEntropyLoss()
1315
+ next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
1316
+
1317
+ if not return_dict:
1318
+ output = (seq_relationship_scores,) + outputs[2:]
1319
+ return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
1320
+
1321
+ return NextSentencePredictorOutput(
1322
+ loss=next_sentence_loss,
1323
+ logits=seq_relationship_scores,
1324
+ hidden_states=outputs.hidden_states,
1325
+ attentions=outputs.attentions,
1326
+ )
1327
+
1328
+
1329
+ @add_start_docstrings(
1330
+ """
1331
+ Nezha Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
1332
+ output) e.g. for GLUE tasks.
1333
+ """,
1334
+ NEZHA_START_DOCSTRING,
1335
+ )
1336
+ class NezhaForSequenceClassification(NezhaPreTrainedModel):
1337
+ def __init__(self, config):
1338
+ super().__init__(config)
1339
+ self.num_labels = config.num_labels
1340
+ self.config = config
1341
+
1342
+ self.nezha = NezhaModel(config)
1343
+ classifier_dropout = (
1344
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1345
+ )
1346
+ self.dropout = nn.Dropout(classifier_dropout)
1347
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1348
+
1349
+ # Initialize weights and apply final processing
1350
+ self.post_init()
1351
+
1352
+ @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1353
+ @add_code_sample_docstrings(
1354
+ checkpoint=_CHECKPOINT_FOR_DOC,
1355
+ output_type=SequenceClassifierOutput,
1356
+ config_class=_CONFIG_FOR_DOC,
1357
+ )
1358
+ def forward(
1359
+ self,
1360
+ input_ids: Optional[torch.Tensor] = None,
1361
+ attention_mask: Optional[torch.Tensor] = None,
1362
+ token_type_ids: Optional[torch.Tensor] = None,
1363
+ head_mask: Optional[torch.Tensor] = None,
1364
+ inputs_embeds: Optional[torch.Tensor] = None,
1365
+ labels: Optional[torch.Tensor] = None,
1366
+ output_attentions: Optional[bool] = None,
1367
+ output_hidden_states: Optional[bool] = None,
1368
+ return_dict: Optional[bool] = None,
1369
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
1370
+ r"""
1371
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1372
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1373
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1374
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1375
+ """
1376
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1377
+
1378
+ outputs = self.nezha(
1379
+ input_ids,
1380
+ attention_mask=attention_mask,
1381
+ token_type_ids=token_type_ids,
1382
+ head_mask=head_mask,
1383
+ inputs_embeds=inputs_embeds,
1384
+ output_attentions=output_attentions,
1385
+ output_hidden_states=output_hidden_states,
1386
+ return_dict=return_dict,
1387
+ )
1388
+
1389
+ pooled_output = outputs[1]
1390
+
1391
+ pooled_output = self.dropout(pooled_output)
1392
+ logits = self.classifier(pooled_output)
1393
+
1394
+ loss = None
1395
+ if labels is not None:
1396
+ if self.config.problem_type is None:
1397
+ if self.num_labels == 1:
1398
+ self.config.problem_type = "regression"
1399
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1400
+ self.config.problem_type = "single_label_classification"
1401
+ else:
1402
+ self.config.problem_type = "multi_label_classification"
1403
+
1404
+ if self.config.problem_type == "regression":
1405
+ loss_fct = MSELoss()
1406
+ if self.num_labels == 1:
1407
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1408
+ else:
1409
+ loss = loss_fct(logits, labels)
1410
+ elif self.config.problem_type == "single_label_classification":
1411
+ loss_fct = CrossEntropyLoss()
1412
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1413
+ elif self.config.problem_type == "multi_label_classification":
1414
+ loss_fct = BCEWithLogitsLoss()
1415
+ loss = loss_fct(logits, labels)
1416
+ if not return_dict:
1417
+ output = (logits,) + outputs[2:]
1418
+ return ((loss,) + output) if loss is not None else output
1419
+
1420
+ return SequenceClassifierOutput(
1421
+ loss=loss,
1422
+ logits=logits,
1423
+ hidden_states=outputs.hidden_states,
1424
+ attentions=outputs.attentions,
1425
+ )
1426
+
1427
+
1428
+ @add_start_docstrings(
1429
+ """
1430
+ Nezha Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1431
+ softmax) e.g. for RocStories/SWAG tasks.
1432
+ """,
1433
+ NEZHA_START_DOCSTRING,
1434
+ )
1435
+ class NezhaForMultipleChoice(NezhaPreTrainedModel):
1436
+ def __init__(self, config):
1437
+ super().__init__(config)
1438
+
1439
+ self.nezha = NezhaModel(config)
1440
+ classifier_dropout = (
1441
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1442
+ )
1443
+ self.dropout = nn.Dropout(classifier_dropout)
1444
+ self.classifier = nn.Linear(config.hidden_size, 1)
1445
+
1446
+ # Initialize weights and apply final processing
1447
+ self.post_init()
1448
+
1449
+ @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1450
+ @add_code_sample_docstrings(
1451
+ checkpoint=_CHECKPOINT_FOR_DOC,
1452
+ output_type=MultipleChoiceModelOutput,
1453
+ config_class=_CONFIG_FOR_DOC,
1454
+ )
1455
+ def forward(
1456
+ self,
1457
+ input_ids: Optional[torch.Tensor] = None,
1458
+ attention_mask: Optional[torch.Tensor] = None,
1459
+ token_type_ids: Optional[torch.Tensor] = None,
1460
+ head_mask: Optional[torch.Tensor] = None,
1461
+ inputs_embeds: Optional[torch.Tensor] = None,
1462
+ labels: Optional[torch.Tensor] = None,
1463
+ output_attentions: Optional[bool] = None,
1464
+ output_hidden_states: Optional[bool] = None,
1465
+ return_dict: Optional[bool] = None,
1466
+ ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
1467
+ r"""
1468
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1469
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1470
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1471
+ `input_ids` above)
1472
+ """
1473
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1474
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1475
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1476
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1477
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1478
+ inputs_embeds = (
1479
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1480
+ if inputs_embeds is not None
1481
+ else None
1482
+ )
1483
+
1484
+ outputs = self.nezha(
1485
+ input_ids,
1486
+ attention_mask=attention_mask,
1487
+ token_type_ids=token_type_ids,
1488
+ head_mask=head_mask,
1489
+ inputs_embeds=inputs_embeds,
1490
+ output_attentions=output_attentions,
1491
+ output_hidden_states=output_hidden_states,
1492
+ return_dict=return_dict,
1493
+ )
1494
+
1495
+ pooled_output = outputs[1]
1496
+ print(pooled_output.shape)
1497
+ pooled_output = self.dropout(pooled_output)
1498
+ logits = self.classifier(pooled_output)
1499
+ print(logits.shape)
1500
+ print(num_choices)
1501
+ reshaped_logits = logits.view(-1, num_choices)
1502
+
1503
+ loss = None
1504
+ if labels is not None:
1505
+ loss_fct = CrossEntropyLoss()
1506
+ loss = loss_fct(reshaped_logits, labels)
1507
+
1508
+ if not return_dict:
1509
+ output = (reshaped_logits,) + outputs[2:]
1510
+ return ((loss,) + output) if loss is not None else output
1511
+
1512
+ return MultipleChoiceModelOutput(
1513
+ loss=loss,
1514
+ logits=reshaped_logits,
1515
+ hidden_states=outputs.hidden_states,
1516
+ attentions=outputs.attentions,
1517
+ )
1518
+
1519
+
1520
+ @add_start_docstrings(
1521
+ """
1522
+ Nezha Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1523
+ Named-Entity-Recognition (NER) tasks.
1524
+ """,
1525
+ NEZHA_START_DOCSTRING,
1526
+ )
1527
+ class NezhaForTokenClassification(NezhaPreTrainedModel):
1528
+ def __init__(self, config):
1529
+ super().__init__(config)
1530
+ self.num_labels = config.num_labels
1531
+
1532
+ self.nezha = NezhaModel(config, add_pooling_layer=False)
1533
+ classifier_dropout = (
1534
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1535
+ )
1536
+ self.dropout = nn.Dropout(classifier_dropout)
1537
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1538
+
1539
+ # Initialize weights and apply final processing
1540
+ self.post_init()
1541
+
1542
+ @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1543
+ @add_code_sample_docstrings(
1544
+ checkpoint=_CHECKPOINT_FOR_DOC,
1545
+ output_type=TokenClassifierOutput,
1546
+ config_class=_CONFIG_FOR_DOC,
1547
+ )
1548
+ def forward(
1549
+ self,
1550
+ input_ids: Optional[torch.Tensor] = None,
1551
+ attention_mask: Optional[torch.Tensor] = None,
1552
+ token_type_ids: Optional[torch.Tensor] = None,
1553
+ head_mask: Optional[torch.Tensor] = None,
1554
+ inputs_embeds: Optional[torch.Tensor] = None,
1555
+ labels: Optional[torch.Tensor] = None,
1556
+ output_attentions: Optional[bool] = None,
1557
+ output_hidden_states: Optional[bool] = None,
1558
+ return_dict: Optional[bool] = None,
1559
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
1560
+ r"""
1561
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1562
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1563
+ """
1564
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1565
+
1566
+ outputs = self.nezha(
1567
+ input_ids,
1568
+ attention_mask=attention_mask,
1569
+ token_type_ids=token_type_ids,
1570
+ head_mask=head_mask,
1571
+ inputs_embeds=inputs_embeds,
1572
+ output_attentions=output_attentions,
1573
+ output_hidden_states=output_hidden_states,
1574
+ return_dict=return_dict,
1575
+ )
1576
+
1577
+ sequence_output = outputs[0]
1578
+
1579
+ sequence_output = self.dropout(sequence_output)
1580
+ logits = self.classifier(sequence_output)
1581
+
1582
+ loss = None
1583
+ if labels is not None:
1584
+ loss_fct = CrossEntropyLoss()
1585
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1586
+
1587
+ if not return_dict:
1588
+ output = (logits,) + outputs[2:]
1589
+ return ((loss,) + output) if loss is not None else output
1590
+
1591
+ return TokenClassifierOutput(
1592
+ loss=loss,
1593
+ logits=logits,
1594
+ hidden_states=outputs.hidden_states,
1595
+ attentions=outputs.attentions,
1596
+ )
1597
+
1598
+
1599
+ @add_start_docstrings(
1600
+ """
1601
+ Nezha Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1602
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1603
+ """,
1604
+ NEZHA_START_DOCSTRING,
1605
+ )
1606
+ class NezhaForQuestionAnswering(NezhaPreTrainedModel):
1607
+ def __init__(self, config):
1608
+ super().__init__(config)
1609
+ self.num_labels = config.num_labels
1610
+
1611
+ self.nezha = NezhaModel(config, add_pooling_layer=False)
1612
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1613
+
1614
+ # Initialize weights and apply final processing
1615
+ self.post_init()
1616
+
1617
+ @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1618
+ @add_code_sample_docstrings(
1619
+ checkpoint=_CHECKPOINT_FOR_DOC,
1620
+ output_type=QuestionAnsweringModelOutput,
1621
+ config_class=_CONFIG_FOR_DOC,
1622
+ )
1623
+ def forward(
1624
+ self,
1625
+ input_ids: Optional[torch.Tensor] = None,
1626
+ attention_mask: Optional[torch.Tensor] = None,
1627
+ token_type_ids: Optional[torch.Tensor] = None,
1628
+ head_mask: Optional[torch.Tensor] = None,
1629
+ inputs_embeds: Optional[torch.Tensor] = None,
1630
+ start_positions: Optional[torch.Tensor] = None,
1631
+ end_positions: Optional[torch.Tensor] = None,
1632
+ output_attentions: Optional[bool] = None,
1633
+ output_hidden_states: Optional[bool] = None,
1634
+ return_dict: Optional[bool] = None,
1635
+ ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
1636
+ r"""
1637
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1638
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1639
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1640
+ are not taken into account for computing the loss.
1641
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1642
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1643
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1644
+ are not taken into account for computing the loss.
1645
+ """
1646
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1647
+
1648
+ outputs = self.nezha(
1649
+ input_ids,
1650
+ attention_mask=attention_mask,
1651
+ token_type_ids=token_type_ids,
1652
+ head_mask=head_mask,
1653
+ inputs_embeds=inputs_embeds,
1654
+ output_attentions=output_attentions,
1655
+ output_hidden_states=output_hidden_states,
1656
+ return_dict=return_dict,
1657
+ )
1658
+
1659
+ sequence_output = outputs[0]
1660
+
1661
+ logits = self.qa_outputs(sequence_output)
1662
+ start_logits, end_logits = logits.split(1, dim=-1)
1663
+ start_logits = start_logits.squeeze(-1).contiguous()
1664
+ end_logits = end_logits.squeeze(-1).contiguous()
1665
+
1666
+ total_loss = None
1667
+ if start_positions is not None and end_positions is not None:
1668
+ # If we are on multi-GPU, split add a dimension
1669
+ if len(start_positions.size()) > 1:
1670
+ start_positions = start_positions.squeeze(-1)
1671
+ if len(end_positions.size()) > 1:
1672
+ end_positions = end_positions.squeeze(-1)
1673
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1674
+ ignored_index = start_logits.size(1)
1675
+ start_positions = start_positions.clamp(0, ignored_index)
1676
+ end_positions = end_positions.clamp(0, ignored_index)
1677
+
1678
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1679
+ start_loss = loss_fct(start_logits, start_positions)
1680
+ end_loss = loss_fct(end_logits, end_positions)
1681
+ total_loss = (start_loss + end_loss) / 2
1682
+
1683
+ if not return_dict:
1684
+ output = (start_logits, end_logits) + outputs[2:]
1685
+ return ((total_loss,) + output) if total_loss is not None else output
1686
+
1687
+ return QuestionAnsweringModelOutput(
1688
+ loss=total_loss,
1689
+ start_logits=start_logits,
1690
+ end_logits=end_logits,
1691
+ hidden_states=outputs.hidden_states,
1692
+ attentions=outputs.attentions,
1693
+ )
venv/lib/python3.10/site-packages/transformers/models/opt/__init__.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_flax_available,
20
+ is_tf_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]}
27
+
28
+ try:
29
+ if not is_torch_available():
30
+ raise OptionalDependencyNotAvailable()
31
+ except OptionalDependencyNotAvailable:
32
+ pass
33
+ else:
34
+ _import_structure["modeling_opt"] = [
35
+ "OPT_PRETRAINED_MODEL_ARCHIVE_LIST",
36
+ "OPTForCausalLM",
37
+ "OPTModel",
38
+ "OPTPreTrainedModel",
39
+ "OPTForSequenceClassification",
40
+ "OPTForQuestionAnswering",
41
+ ]
42
+
43
+ try:
44
+ if not is_tf_available():
45
+ raise OptionalDependencyNotAvailable()
46
+ except OptionalDependencyNotAvailable:
47
+ pass
48
+ else:
49
+ _import_structure["modeling_tf_opt"] = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"]
50
+
51
+ try:
52
+ if not is_flax_available():
53
+ raise OptionalDependencyNotAvailable()
54
+ except OptionalDependencyNotAvailable:
55
+ pass
56
+ else:
57
+ _import_structure["modeling_flax_opt"] = [
58
+ "FlaxOPTForCausalLM",
59
+ "FlaxOPTModel",
60
+ "FlaxOPTPreTrainedModel",
61
+ ]
62
+
63
+
64
+ if TYPE_CHECKING:
65
+ from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
66
+
67
+ try:
68
+ if not is_torch_available():
69
+ raise OptionalDependencyNotAvailable()
70
+ except OptionalDependencyNotAvailable:
71
+ pass
72
+ else:
73
+ from .modeling_opt import (
74
+ OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
75
+ OPTForCausalLM,
76
+ OPTForQuestionAnswering,
77
+ OPTForSequenceClassification,
78
+ OPTModel,
79
+ OPTPreTrainedModel,
80
+ )
81
+
82
+ try:
83
+ if not is_tf_available():
84
+ raise OptionalDependencyNotAvailable()
85
+ except OptionalDependencyNotAvailable:
86
+ pass
87
+ else:
88
+ from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
89
+
90
+ try:
91
+ if not is_flax_available():
92
+ raise OptionalDependencyNotAvailable()
93
+ except OptionalDependencyNotAvailable:
94
+ pass
95
+ else:
96
+ from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
97
+
98
+ else:
99
+ import sys
100
+
101
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/opt/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.54 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/opt/__pycache__/configuration_opt.cpython-310.pyc ADDED
Binary file (5.39 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/opt/__pycache__/convert_opt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (2.58 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/opt/__pycache__/modeling_flax_opt.cpython-310.pyc ADDED
Binary file (21.4 kB). View file